Просмотр исходного кода

Merge branch 'development' of https://github.com/visuddhinanda/mint into development

visuddhinanda 7 месяцев назад
Родитель
Сommit
8ba20b619b
62 измененных файлов с 2225 добавлено и 584 удалено
  1. 1 2
      ai-translate/README.md
  2. 11 6
      ai-translate/ai_translate/__init__.py
  3. 83 55
      ai-translate/ai_translate/service.py
  4. 18 22
      ai-translate/ai_translate/worker.py
  5. 2 2
      ai-translate/config.orig.toml
  6. 6 5
      ai-translate/docker/Dockerfile
  7. 126 0
      api-v8/app/Console/Commands/TestProjectCopyTask.php
  8. 76 0
      api-v8/app/Console/Commands/TestWorkerStartProject.php
  9. 3 0
      api-v8/app/Http/Api/AiAssistantApi.php
  10. 4 1
      api-v8/app/Http/Api/AuthApi.php
  11. 2 1
      api-v8/app/Http/Api/StudioApi.php
  12. 1 1
      api-v8/app/Http/Api/UserApi.php
  13. 1 1
      api-v8/app/Http/Controllers/DictController.php
  14. 1 1
      api-v8/app/Http/Controllers/DiscussionController.php
  15. 258 0
      api-v8/app/Http/Controllers/MockOpenAIController.php
  16. 36 15
      api-v8/app/Http/Controllers/TaskStatusController.php
  17. 48 0
      api-v8/app/Http/Requests/StoreDiscussionRequest.php
  18. 1 8
      api-v8/app/Jobs/ProcessAITranslateJob.php
  19. 22 3
      api-v8/app/Models/Discussion.php
  20. 12 1
      api-v8/app/Services/AiTranslateService.php
  21. 33 0
      api-v8/app/Services/DiscussionService.php
  22. 1 8
      api-v8/config/mint.php
  23. BIN
      api-v8/public/assets/images/avatar/openai.png
  24. 5 0
      api-v8/routes/api.php
  25. 275 0
      api-v8/tests/Feature/MockOpenAIApiTest.php
  26. 1 1
      dashboard-v4/dashboard/.env.orig
  27. 4 1
      dashboard-v4/dashboard/src/components/api/task.ts
  28. 263 325
      dashboard-v4/dashboard/src/components/chat/AiChat.tsx
  29. 152 0
      dashboard-v4/dashboard/src/components/chat/MsgAssistant.tsx
  30. 29 0
      dashboard-v4/dashboard/src/components/chat/MsgContainer.tsx
  31. 23 0
      dashboard-v4/dashboard/src/components/chat/MsgError.tsx
  32. 20 0
      dashboard-v4/dashboard/src/components/chat/MsgLoading.tsx
  33. 42 0
      dashboard-v4/dashboard/src/components/chat/MsgSystem.tsx
  34. 23 0
      dashboard-v4/dashboard/src/components/chat/MsgTyping.tsx
  35. 149 0
      dashboard-v4/dashboard/src/components/chat/MsgUser.tsx
  36. 146 0
      dashboard-v4/dashboard/src/components/chat/PromptButtonGroup.tsx
  37. 1 17
      dashboard-v4/dashboard/src/components/task/Description.tsx
  38. 59 40
      dashboard-v4/dashboard/src/components/task/TaskBuilderChapter.tsx
  39. 83 0
      dashboard-v4/dashboard/src/components/task/TaskLog.tsx
  40. 12 1
      dashboard-v4/dashboard/src/components/task/TaskReader.tsx
  41. 21 26
      dashboard-v4/dashboard/src/components/task/TaskStatus.tsx
  42. 7 0
      dashboard-v4/dashboard/src/components/task/TaskStatusButton.tsx
  43. 1 0
      dashboard-v4/dashboard/src/components/template/SentEdit/SentEditMenu.tsx
  44. 5 4
      dashboard-v4/dashboard/src/locales/en-US/buttons.ts
  45. 2 0
      dashboard-v4/dashboard/src/locales/en-US/label.ts
  46. 1 0
      dashboard-v4/dashboard/src/locales/zh-Hans/buttons.ts
  47. 2 0
      dashboard-v4/dashboard/src/locales/zh-Hans/label.ts
  48. 4 25
      dashboard-v4/dashboard/src/pages/library/search/search.tsx
  49. 16 4
      deploy/mint.yml
  50. 35 0
      deploy/roles/mint-v2.1/tasks/ai-translate.yml
  51. 2 1
      deploy/roles/mint-v2.1/tasks/laravel-workers.yml
  52. 2 3
      deploy/roles/mint-v2.1/tasks/laravel.yml
  53. 35 0
      deploy/roles/mint-v2.1/tasks/openai-proxy.yml
  54. 14 0
      deploy/roles/mint-v2.1/templates/containers/ai-translate.sh.j2
  55. 0 0
      deploy/roles/mint-v2.1/templates/containers/laravel-worker.sh.j2
  56. 13 0
      deploy/roles/mint-v2.1/templates/containers/openai-proxy.sh.j2
  57. 1 1
      deploy/roles/mint-v2.1/templates/containers/shell.sh.j2
  58. 16 0
      deploy/roles/mint-v2.1/templates/v2/ai-translate.toml.j2
  59. 5 0
      deploy/roles/mint-v2.1/templates/v2/openai-proxy.json.j2
  60. 5 1
      open-ai-server/config.orig.json
  61. 1 0
      open-ai-server/src/index.js
  62. 4 2
      open-ai-server/src/server.js

+ 1 - 2
ai-translate/README.md

@@ -9,8 +9,7 @@ $ source $HOME/tmp/python3/bin/activate
 > python3 -m pip install -e .
 
 > python3 -m ai_translate -h
-> python3 -m ai_translate -d -c config.toml -n worker-us-1 -q ai_translate_us
-> python3 -m ai_translate -d -c config.toml -n worker-cn-1 -q ai_translate_cn
+> python3 -m ai_translate -d -c config.toml -n worker-us-1 -q ai_translate_v2
 
 # exit the virtual env
 > deactivate

+ 11 - 6
ai-translate/ai_translate/__init__.py

@@ -1,6 +1,8 @@
 import logging
 import tomllib
 import json
+import socket
+import os
 
 import pika
 from redis.cluster import RedisCluster
@@ -20,12 +22,14 @@ def open_redis_cluster(config):
     return (cli, config['namespace'])
 
 
-def start_consumer(context, name, config, queue, callback,proxy):
+def start_consumer(context, name, config, queue, callback, proxy):
+    HeartBeat = 3600
     logger.debug("open rabbitmq %s@%s:%d/%s with timeout %ds",
                  config['user'], config['host'], config['port'], config['virtual-host'], config['customer-timeout'])
     connection = pika.BlockingConnection(
         pika.ConnectionParameters(
             host=config['host'], port=config['port'],
+            heartbeat=HeartBeat,
             credentials=pika.PlainCredentials(
                 config['user'], config['password']),
             virtual_host=config['virtual-host']))
@@ -37,11 +41,12 @@ def start_consumer(context, name, config, queue, callback,proxy):
         handle_message(context, ch, method, properties.message_id,
                        properties.content_type, json.loads(
                            body, object_hook=SimpleNamespace),
-                       callback,proxy, config['customer-timeout'])
+                       callback, proxy, HeartBeat, name)
 
     channel.basic_consume(
-        queue=queue, on_message_callback=_callback, auto_ack=False)
+        queue=queue, on_message_callback=_callback, auto_ack=True)
 
+    name = "%s.%s.%d" % (name, socket.gethostname(), os.getpid())
     logger.info('start a consumer(%s) for queue(%s)', name, queue)
     channel.start_consuming()
 
@@ -52,9 +57,9 @@ def launch(name, queue, config_file):
         config = tomllib.load(config_fd)
         logger.debug('api-url:(%s)', config['app']['api-url'])
         redis_cli = open_redis_cluster(config['redis'])
-        openai_proxy = config['app'].get('openai-proxy', None)
+        openai_proxy = config['app'].get('openai-proxy-url', None)
         logger.debug(f'openai_proxy:({openai_proxy})')
         start_consumer(redis_cli, name,
-                       config['rabbitmq'], 
-                       queue, config['app']['api-url'], 
+                       config['rabbitmq'],
+                       queue, config['app']['api-url'],
                        openai_proxy)

+ 83 - 55
ai-translate/ai_translate/service.py

@@ -106,7 +106,7 @@ class Message:
 class AiTranslateService:
     """AI翻译服务"""
 
-    def __init__(self, redis, ch, method, api_url, openai_proxy,customer_timeout):
+    def __init__(self, redis, ch, method, api_url, openai_proxy, customer_timeout, worker_name: str):
         self.queue = 'ai_translate'
         self.model_token = None
         self.task = None
@@ -119,7 +119,8 @@ class AiTranslateService:
         self.customer_timeout = customer_timeout
         self.channel = ch
         self.maxProcessTime = 15 * 60  # 一个句子的最大处理时间
-        self.openai_proxy=openai_proxy 
+        self.openai_proxy = openai_proxy
+        self.worker_name = worker_name
 
     def process_translate(self, message_id: str, body: Message) -> bool:
         """处理翻译任务"""
@@ -130,7 +131,7 @@ class AiTranslateService:
 
         self.redis_clusters.set(
             f"{self.redis_namespace}/task/{self.task.id}/message_id", message_id)
-        pointer_key = f"{self.redis_namespace}/task/{message_id}/pointer"
+        pointer_key = f"{self.redis_namespace}/task/{self.task.id}/pointer"
         pointer = 0
 
         if self.redis_clusters.exists(pointer_key):
@@ -156,7 +157,7 @@ class AiTranslateService:
                 self.task.id,
                 'task',
                 self.task.title,
-                f'id:{message_id}',
+                f'id:{message_id} worker:{self.worker_name}',
                 None
             )
         times = [self.maxProcessTime]
@@ -187,13 +188,15 @@ class AiTranslateService:
             s_uid = self._get_sentence_id(message.sentence)
 
             # 写入句子 discussion
-            topic_children = []
-            # 任务结果
-            topic_children.append(response_llm['content'])
-            # 推理过程写入discussion
-            if response_llm.get('reasoningContent'):
-                topic_children.append(response_llm['reasoningContent'])
-            self._sentence_discussion(s_uid, message.prompt, topic_children)
+            if s_uid:
+                topic_children = []
+                # 任务结果
+                topic_children.append(response_llm['content'])
+                # 推理过程写入discussion
+                if response_llm.get('reasoningContent'):
+                    topic_children.append(response_llm['reasoningContent'])
+                self._sentence_discussion(
+                    s_uid, message.prompt, topic_children)
 
             # 修改task 完成度
             progress = self._set_task_progress(
@@ -213,20 +216,6 @@ class AiTranslateService:
             else:
                 logger.error('no task discussion root')
 
-            if i + 1 < len(body.payload):
-                self.redis_clusters.set(pointer_key, i+1)
-                # 计算本次时间和剩余时间
-                # breakpoint()
-                onceTime = int(time.time())-startAt
-                times.append(onceTime)
-                times.sort(reverse=True)
-                # 取出第一个元素
-                maxTime = times[0]
-                # 计算剩余时间
-                remain = self.customer_timeout-(int(time.time())-taskStartAt)
-                if remain < maxTime:
-                    # 时间不足
-                    raise SectionTimeout
         # 任务完成 修改任务状态为 done
         self._set_task_status(self.task.id, 'done')
         self.redis_clusters.delete(pointer_key)
@@ -279,10 +268,9 @@ class AiTranslateService:
         headers = {'Authorization': f'Bearer {token}'}
         response = requests.post(
             url, json=data, headers=headers, timeout=self.api_timeout)
-        # breakpoint()
         if not response.ok:
             logger.error(
-                f'ai-translate model log create failed: {response.json()}')
+                f'ai-translate model log create failed: {response.text}')
             return False
         return True
 
@@ -362,7 +350,7 @@ class AiTranslateService:
                         json={
                             "open_ai_url": message.model.url,
                             "api_key": message.model.key,
-                            'payload':param,
+                            'payload': param,
                         },
                         headers=headers,
                         timeout=self.llm_timeout
@@ -388,10 +376,10 @@ class AiTranslateService:
                 break
             except requests.exceptions.RequestException as e:
                 model_log_data.update({
-                    'response_headers': json.dumps(dict(e.response.request.headers), ensure_ascii=False),
+                    'request_headers': json.dumps(dict(e.response.request.headers), ensure_ascii=False),
                     'response_headers': json.dumps(dict(e.response.headers), ensure_ascii=False),
                     'status': e.response.status_code,
-                    'response_data': json.dumps(e.response.json(), ensure_ascii=False),
+                    'response_data': e.response.text,
                     'success': False
                 })
                 attempt += 1
@@ -401,6 +389,7 @@ class AiTranslateService:
                 # 某些错误不需要重试
                 if status in [400, 401, 403, 404, 422]:
                     logger.warning(f"客户端错误,不重试: {status}")
+                    self._save_model_log(self.model_token, model_log_data)
                     raise LLMFailException
 
                 # 服务器错误或网络错误可以重试
@@ -421,7 +410,6 @@ class AiTranslateService:
                     logger.error(e)
 
         ai_data = response.json()
-        # logger.debug(f'{self.queue} LLM http response: {response.json()}')
 
         response_content = ai_data['choices'][0]['message']['content']
         reasoning_content = ai_data['choices'][0]['message'].get(
@@ -491,29 +479,34 @@ class AiTranslateService:
 
     def _get_sentence_id(self, sentence: Sentence) -> str:
         """获取句子ID"""
-        url = f"{self.api_url}/v2/sentence-info/aa"
-        logger.info(f'ai translate: {url}')
-
-        params = {
-            'book': sentence.book_id,
-            'par': sentence.paragraph,
-            'start': sentence.word_start,
-            'end': sentence.word_end,
-            'channel': sentence.channel_uid
-        }
-
-        headers = {'Authorization': f'Bearer {self.model_token}'}
-        response = requests.get(
-            url, params=params, headers=headers, timeout=self.api_timeout)
-
-        if not response.json().get('ok'):
-            logger.error(f'{self.queue} sentence id error: {response.json()}')
+        try:
+            url = f"{self.api_url}/v2/sentence-info/aa"
+            logger.info(f'ai translate: {url}')
+
+            params = {
+                'book': sentence.book_id,
+                'par': sentence.paragraph,
+                'start': sentence.word_start,
+                'end': sentence.word_end,
+                'channel': sentence.channel_uid
+            }
+
+            headers = {'Authorization': f'Bearer {self.model_token}'}
+            response = requests.get(
+                url, params=params, headers=headers, timeout=self.api_timeout)
+
+            if not response.json().get('ok'):
+                logger.error(
+                    f'{self.queue} sentence id error: {response.text}')
+                return False
+
+            s_uid = response.json()['data']['id']
+            logger.debug(f"sentence id={s_uid}")
+            return s_uid
+        except Exception as e:
+            logger.error(f"error: {e}")
             return False
 
-        s_uid = response.json()['data']['id']
-        logger.debug(f"sentence id={s_uid}")
-        return s_uid
-
     def _set_task_progress(self, current: TaskProgress) -> int:
         """设置任务进度"""
         if current.total > 0:
@@ -542,7 +535,7 @@ class AiTranslateService:
 
         return progress
 
-    def handle_failed_translate(self, message_id: str, translate_data: List[Any], exception: Exception):
+    def handle_failed(self, message_id: str, message: str, exception: Exception):
         """处理失败的翻译任务"""
         try:
             # 彻底失败时的业务逻辑
@@ -551,13 +544,48 @@ class AiTranslateService:
 
             # 将故障信息写入task discussion
             if self.task_topic_id:
-                error_message = f"**处理失败ai任务时出错** 请重启任务 message id={message_id} 错误信息:{str(exception)}"
+                error_message = f"**任务处理失败** 请重启任务 \n- message id={message_id} \n- 错误信息:{message} \n- 异常:{str(exception)}"
                 d_id = self._task_discussion(
                     self.task.id,
                     'task',
-                    self.task.title,
+                    '任务处理失败',
+                    error_message,
+                    self.task_topic_id
+                )
+        except Exception as e:
+            logger.error(f'处理失败ai任务时出错: {str(e)}')
+
+    def handle_retry(self, message_id: str, message: str, exception: Exception):
+        """处理失败 需要重试"""
+        try:
+            # 失败时的业务逻辑
+            self._set_task_status(self.task.id, 'pause')
+            # 将故障信息写入task discussion
+            if self.task_topic_id:
+                error_message = f"任务处理出错 正在重试 \n- message id={message_id} \n- 错误信息:{message} \n- 异常:{str(exception)}"
+                d_id = self._task_discussion(
+                    self.task.id,
+                    'task',
+                    '任务处理出错',
                     error_message,
                     self.task_topic_id
                 )
         except Exception as e:
             logger.error(f'处理失败ai任务时出错: {str(e)}')
+
+    def handle_complete(self):
+        try:
+            # 将故障信息写入task discussion
+            if self.task_topic_id:
+                d_id = self._task_discussion(
+                    self.task.id,
+                    'task',
+                    '任务处理完成',
+                    '任务处理完成',
+                    self.task_topic_id
+                )
+        except Exception as e:
+            logger.error(f'处理任务完成时出错: {str(e)}')
+
+    def get_task_id(self) -> str:
+        return self.task.id

+ 18 - 22
ai-translate/ai_translate/worker.py

@@ -7,42 +7,38 @@ from .utils import is_stopped
 logger = logging.getLogger(__name__)
 
 
-def handle_message(redis, ch, method, id, content_type, body, api_url: str,openai_proxy:str, customer_timeout: int):
+def handle_message(redis, ch, method, id, content_type, body, api_url: str, openai_proxy: str, customer_timeout: int, worker_name: str):
     MaxRetry: int = 3
     try:
         logger.info("process message start (%s) messages", len(body.payload))
         consumer = AiTranslateService(
-            redis, ch, method, api_url,openai_proxy, customer_timeout)
+            redis, ch, method, api_url, openai_proxy, customer_timeout, worker_name)
         messages = ns_to_dataclass([body], Message)
         consumer.process_translate(id, messages[0])
-        ch.basic_ack(delivery_tag=method.delivery_tag)  # 确认消息
         logger.info(f'message {id} ack')
-    except SectionTimeout as e:
-        # 时间到了,活还没干完 NACK 并重新入队
-        logger.info(
-            f'time is not enough for complete current message id={id}. requeued')
-        ch.basic_nack(delivery_tag=method.delivery_tag, requeue=True)
+        consumer.handle_complete()
     except LLMFailException as e:
-        ch.basic_nack(delivery_tag=method.delivery_tag,
-                      requeue=False)
-        logger.warning(f'message {id} LLMFailException')
+        errMsg = f'message {id} LLM Fail'
+        logger.warning(errMsg)
+        consumer.handle_failed(id, errMsg, e)
     except Exception as e:
+        logger.error(f"error: {e}")
+        logger.exception("Exception")
         # retry
-        retryKey = f'{redis[1]}/message/retry/{id}'
+        task_id = consumer.get_task_id()
+        retryKey = f'/mq/message/retry/{task_id}'
         retry = int(redis[0].get(retryKey)
                     or 0) if redis[0].exists(retryKey) else 0
-        if retry > MaxRetry:
-            logger.warning(f'超过最大重试次数[{MaxRetry}],任务失败 id={id}')
-            # NACK 丢弃或者进入死信队列
-            ch.basic_nack(delivery_tag=method.delivery_tag,
-                          requeue=False)
+        if retry >= MaxRetry:
+            errMsg = f'超过最大重试次数[{MaxRetry}],任务失败 id={id} task={task_id}'
+            redis[0].delete(retryKey)
+            logger.warning(errMsg)
+            consumer.handle_failed(id, errMsg, e)
         else:
             retry = retry+1
             redis[0].set(retryKey, retry)
-            # NACK 并重新入队
-            logger.warning(f'消息处理错误,重新压入队列 [{retry}/{MaxRetry}]')
-            ch.basic_nack(delivery_tag=method.delivery_tag, requeue=True)
-            logger.error(f"error: {e}")
-            logger.exception("发生异常")
+            errMsg = f'消息处理错误,需要重试 [{retry}/{MaxRetry}]'
+            logger.warning(errMsg)
+            consumer.handle_retry(id, errMsg, e)
     finally:
         is_stopped()

+ 2 - 2
ai-translate/config.orig.toml

@@ -4,7 +4,7 @@ port = 5672
 user = 'www'
 password = 'change-me'
 virtual-host = 'testing'
-customer-timeout = 600
+customer-timeout = 3600
 
 [redis]
 namespace = 'testing://'
@@ -13,4 +13,4 @@ port = 6371
 
 [app]
 api-url = 'http://127.0.0.1:8000/api'
-openai-proxy = 'http://localhost:4000/api/openai'
+openai-proxy-url = 'http://localhost:4000/api/openai'

+ 6 - 5
ai-translate/docker/Dockerfile

@@ -20,16 +20,17 @@ RUN locale-gen
 RUN update-locale LANG=en_US.UTF-8
 RUN update-alternatives --set editor /usr/bin/vim.basic
 
-RUN useradd -s /bin/bash -m deploy
-RUN passwd -l deploy
-RUN echo 'deploy ALL=(ALL:ALL) NOPASSWD: ALL' > /etc/sudoers.d/101-deploy
-USER deploy
+# RUN useradd -s /bin/bash -m deploy
+# RUN passwd -l deploy
+# RUN echo 'deploy ALL=(ALL:ALL) NOPASSWD: ALL' > /etc/sudoers.d/101-deploy
+# USER deploy
+# ADD --chown=deploy https://bootstrap.pypa.io/get-pip.py /opt/
 
 RUN python${PYTHON_VERSION} -m venv $HOME/python3
 RUN echo 'source $HOME/python3/bin/activate' >> $HOME/.bashrc
 
 # https://pip.pypa.io/en/stable/installation/#get-pip-py
-ADD --chown=deploy https://bootstrap.pypa.io/get-pip.py /opt/
+ADD https://bootstrap.pypa.io/get-pip.py /opt/
 RUN bash -c ". $HOME/python3/bin/activate && python3 /opt/get-pip.py"
 RUN bash -c ". $HOME/python3/bin/activate && pip install pika requests redis[hiredis] openai"
 

+ 126 - 0
api-v8/app/Console/Commands/TestProjectCopyTask.php

@@ -0,0 +1,126 @@
+<?php
+
+namespace App\Console\Commands;
+
+use Illuminate\Console\Command;
+use Illuminate\Support\Facades\Log;
+use Illuminate\Support\Facades\Http;
+use Illuminate\Support\Str;
+
+class TestProjectCopyTask extends Command
+{
+    /**
+     * The name and signature of the console command.
+     * php artisan test:project.copy.task project-50 dd9bcba8-ad3f-4082-9b52-4f5f8acdbd5f visuddhinanda
+     * @var string
+     */
+    protected $signature = 'test:project.copy.task {project} {task} {studio} {--token=}';
+
+    /**
+     * The console command description.
+     *
+     * @var string
+     */
+    protected $description = '建立project 并复制task';
+
+    /**
+     * Create a new command instance.
+     *
+     * @return void
+     */
+    public function __construct()
+    {
+        parent::__construct();
+    }
+
+    /**
+     * Execute the console command.
+     *
+     * @return int
+     */
+    public function handle()
+    {
+        $appUrl = config('app.url');
+        $projectTitle = $this->argument('project');
+        $taskId = $this->argument('task');
+        $studioName = $this->argument('studio');
+        $token = $this->option('token');
+
+        // 如果 role 选项未提供(为空),提示用户输入
+        if (empty($token)) {
+            $token = $this->ask('Please enter the user token:');
+        }
+
+        $taskCount = $this->ask('Please enter the task count:');
+        $url = $appUrl . '/api/v2/project-tree';
+        $this->info('create project ' . $url);
+        $projects = array();
+        $rootId = Str::uuid();
+        $projects[] = [
+            'id' => $rootId,
+            'title' => $projectTitle,
+            'type' => "instance",
+            'parent_id' => '',
+            'weight' => 0,
+            'res_id' => $rootId,
+        ];
+        for ($i = 0; $i < $taskCount; $i++) {
+            $uid = Str::uuid();
+            $projects[] = [
+                'id' => $uid,
+                'title' => "{$projectTitle}_{$i}",
+                'type' => "instance",
+                'parent_id' => $rootId,
+                'weight' => 0,
+                'res_id' => $uid,
+            ];
+        }
+        $response = Http::withToken($token)
+            ->post($url, [
+                'studio_name' => $studioName,
+                'data' => $projects,
+            ]);
+        if ($response->failed()) {
+            $this->error('project create fail' . $response->json('message'));
+            Log::error('project create fail', ['data' => $response->body()]);
+            return 1;
+        }
+
+        $projectsData = $response->json()['data']['rows'];
+        $this->info('project :' . count($projectsData));
+        //获取task
+        $response = Http::withToken($token)
+            ->get($appUrl . '/api/v2/task/' . $taskId);
+        if ($response->failed()) {
+            $this->error('task read fail' . $response->json('message'));
+            Log::error('task read fail', ['data' => $response->body()]);
+            return 1;
+        }
+
+        //建立task
+        $task = $response->json()['data'];
+        $taskTitle = $task['title'];
+        $this->info('task title:' . $task['title']);
+        $tasks = array();
+        foreach ($projectsData as $key => $project) {
+            if ($project['isLeaf']) {
+                $task['title'] = "{$taskTitle}_{$key}";
+                $tasks[] = [
+                    'project_id' => $project['id'],
+                    'tasks' => [$task]
+                ];
+            }
+        }
+
+        $response = Http::withToken($token)
+            ->post($appUrl . '/api/v2/task-group', [
+                'data' => $tasks,
+            ]);
+        if ($response->failed()) {
+            $this->error('task create fail' . $response->json('message'));
+            Log::error('task create fail', ['data' => $response->body()]);
+            return 1;
+        }
+        return 0;
+    }
+}

+ 76 - 0
api-v8/app/Console/Commands/TestWorkerStartProject.php

@@ -0,0 +1,76 @@
+<?php
+
+namespace App\Console\Commands;
+
+use Illuminate\Console\Command;
+use Illuminate\Support\Facades\Log;
+use Illuminate\Support\Facades\Http;
+
+class TestWorkerStartProject extends Command
+{
+    /**
+     * The name and signature of the console command.
+     * php artisan test:worker.start.project 0c3d2f69-1098-428b-95db-f1183667c799
+     * @var string
+     */
+    protected $signature = 'test:worker.start.project {project} {--token=}';
+
+    /**
+     * The console command description.
+     *
+     * @var string
+     */
+    protected $description = 'Command description';
+
+    /**
+     * Create a new command instance.
+     *
+     * @return void
+     */
+    public function __construct()
+    {
+        parent::__construct();
+    }
+
+    /**
+     * Execute the console command.
+     *
+     * @return int
+     */
+    public function handle()
+    {
+        $appUrl = config('app.url');
+        $projectId = $this->argument('project');
+        $token = $this->option('token');
+        // 如果 role 选项未提供(为空),提示用户输入
+        if (empty($token)) {
+            $token = $this->ask('Please enter the user token:');
+        }
+
+        $status = $this->choice(
+            'Which framework do you prefer?',
+            ['published', 'restarted', 'stop'],
+            0 // 默认选择 Laravel(索引 0)
+        );
+
+        $response = Http::withToken($token)
+            ->get($appUrl . "/api/v2/task?view=project&project_id={$projectId}&status=all&order=order&dir=asc");
+        if ($response->failed()) {
+            $this->error('task read fail' . $response->json('message'));
+            Log::error('task read fail', ['data' => $response->body()]);
+            return 1;
+        }
+        $tasks = $response->json()['data']['rows'];
+        foreach ($tasks as $key => $task) {
+            $this->info("[{$key}]task " . $task['title'] . ' status ' . $task['status']);
+            $response = Http::withToken($token)
+                ->patch($appUrl . "/api/v2/task-status/" . $task['id'], ['status' => $status]);
+            if ($response->failed()) {
+                $this->error('task status fail' . $response->json('message'));
+                Log::error('task status fail', ['data' => $response->body()]);
+            }
+            $this->info("[{$key}]task status changed {$status}");
+        }
+        return 0;
+    }
+}

+ 3 - 0
api-v8/app/Http/Api/AiAssistantApi.php

@@ -48,6 +48,9 @@ class AiAssistantApi
                 if (strpos($user->model, $key) !== false) {
                     $logo = $value;
                     break;
+                } else if (strpos($user->url, $key) !== false) {
+                    $logo = $value;
+                    break;
                 }
             }
             $base = config('app.url') . '/assets/images/avatar/';

+ 4 - 1
api-v8/app/Http/Api/AuthApi.php

@@ -31,7 +31,10 @@ class AuthApi
                 return ['user_uid' => $jwt->uid, 'user_id' => $jwt->id];
             }
         } else if (isset($_COOKIE['user_uid'])) {
-            return ['user_uid' => $_COOKIE['user_uid'], 'user_id' => $_COOKIE['user_id']];
+            return [
+                'user_uid' => $_COOKIE['user_uid'],
+                'user_id' => $_COOKIE['user_id']
+            ];
         } else {
             return false;
         }

+ 2 - 1
api-v8/app/Http/Api/StudioApi.php

@@ -52,7 +52,8 @@ class StudioApi
             }
             if ($userInfo->avatar) {
                 $img = str_replace('.jpg', '_s.jpg', $userInfo->avatar);
-                if (App::environment('local')) {
+
+                if (App::environment(['local', 'testing'])) {
                     $data['avatar'] = Storage::url($img);
                 } else {
                     $data['avatar'] = Storage::temporaryUrl($img, now()->addDays(6));

+ 1 - 1
api-v8/app/Http/Api/UserApi.php

@@ -90,7 +90,7 @@ class UserApi
         }
         if ($user->avatar) {
             $img = str_replace('.jpg', '_s.jpg', $user->avatar);
-            if (App::environment('local')) {
+            if (App::environment(['local', 'testing'])) {
                 $data['avatar'] = Storage::url($img);
             } else {
                 $data['avatar'] = Storage::temporaryUrl($img, now()->addDays(6));

+ 1 - 1
api-v8/app/Http/Controllers/DictController.php

@@ -183,7 +183,7 @@ class DictController extends Controller
             }
         }
 
-        if ($resultCount < 2) {
+        if ($resultCount < 2 && $request->has('content')) {
             //查询内文
             $wordDataOutput = [];
             $table = UserDict::select($indexCol)

+ 1 - 1
api-v8/app/Http/Controllers/DiscussionController.php

@@ -210,7 +210,7 @@ class DiscussionController extends Controller
 
         $table = $table->orderBy($request->get('order', 'created_at'), $request->get('dir', 'desc'));
         $table = $table->skip($request->get("offset", 0))
-            ->take($request->get('limit', 1000));
+            ->take($request->get('limit', 100));
 
         $result = $table->get();
 

+ 258 - 0
api-v8/app/Http/Controllers/MockOpenAIController.php

@@ -0,0 +1,258 @@
+<?php
+
+namespace App\Http\Controllers;
+
+use Illuminate\Http\Request;
+use Illuminate\Http\JsonResponse;
+use Illuminate\Support\Str;
+
+class MockOpenAIController extends Controller
+{
+    /**
+     * 模拟 Chat Completions API
+     */
+    public function chatCompletions(Request $request): JsonResponse
+    {
+        // 随机延迟
+        $this->randomDelay($request->query('delay', 'h'));
+
+        // 随机返回错误
+        if ($errorResponse = $this->randomError($request->query('error', "h"))) {
+            return $errorResponse;
+        }
+
+        $model = $request->input('model', 'gpt-3.5-turbo');
+        $messages = $request->input('messages', []);
+
+        return response()->json([
+            'id' => 'chatcmpl-' . Str::random(29),
+            'object' => 'chat.completion',
+            'created' => time(),
+            'model' => $model,
+            'choices' => [
+                [
+                    'index' => 0,
+                    'message' => [
+                        'role' => 'assistant',
+                        'content' => $this->generateMockResponse($messages)
+                    ],
+                    'finish_reason' => 'stop'
+                ]
+            ],
+            'usage' => [
+                'prompt_tokens' => rand(10, 100),
+                'completion_tokens' => rand(20, 200),
+                'total_tokens' => rand(30, 300)
+            ]
+        ]);
+    }
+
+    /**
+     * 模拟 Completions API
+     */
+    public function completions(Request $request): JsonResponse
+    {
+        // 随机延迟
+        $this->randomDelay($request->query('delay', 'h'));
+
+        // 随机返回错误
+        if ($errorResponse = $this->randomError($request->query('error', "h"))) {
+            return $errorResponse;
+        }
+
+        $model = $request->input('model', 'text-davinci-003');
+        $prompt = $request->input('prompt', '');
+
+        return response()->json([
+            'id' => 'cmpl-' . Str::random(29),
+            'object' => 'text_completion',
+            'created' => time(),
+            'model' => $model,
+            'choices' => [
+                [
+                    'text' => $this->generateMockTextResponse($prompt),
+                    'index' => 0,
+                    'logprobs' => null,
+                    'finish_reason' => 'stop'
+                ]
+            ],
+            'usage' => [
+                'prompt_tokens' => rand(10, 100),
+                'completion_tokens' => rand(20, 200),
+                'total_tokens' => rand(30, 300)
+            ]
+        ]);
+    }
+
+    /**
+     * 模拟 Models API
+     */
+    public function models(Request $request): JsonResponse
+    {
+
+        return response()->json([
+            'object' => 'list',
+            'data' => [
+                [
+                    'id' => 'gpt-4',
+                    'object' => 'model',
+                    'created' => 1687882411,
+                    'owned_by' => 'openai'
+                ],
+                [
+                    'id' => 'gpt-3.5-turbo',
+                    'object' => 'model',
+                    'created' => 1677610602,
+                    'owned_by' => 'openai'
+                ],
+                [
+                    'id' => 'text-davinci-003',
+                    'object' => 'model',
+                    'created' => 1669599635,
+                    'owned_by' => 'openai-internal'
+                ]
+            ]
+        ]);
+    }
+
+    /**
+     * 随机延迟
+     */
+    private function randomDelay(string $level): void
+    {
+        switch ($level) {
+            case 'l':
+                sleep(1);
+                break;
+            case 'm':
+                sleep(rand(1, 3));
+                break;
+            case 'h':
+                // 90% 概率 1-3秒延迟
+                // 10% 概率 60-100秒延迟
+                if (rand(1, 100) <= 10) {
+                    sleep(rand(60, 100));
+                } else {
+                    sleep(rand(1, 3));
+                }
+                break;
+            default:
+                break;
+        }
+    }
+
+    /**
+     * 随机返回错误响应
+     */
+    private function randomError(string $level): ?JsonResponse
+    {
+        switch ($level) {
+            case 'l':
+                if (rand(1, 100) <= 10) {
+                    return $this->rateLimitError();
+                }
+                break;
+            case 'm':
+                if (rand(1, 100) <= 20) {
+                    return $this->rateLimitError();
+                }
+                break;
+            case 'h':
+                // 20% 概率返回三种错误
+                if (rand(1, 100) <= 20) {
+                    $errorType = rand(1, 3);
+                    switch ($errorType) {
+                        case 1:
+                            return $this->badRequestError();
+                        case 2:
+                            return $this->internalServerError();
+                        case 3:
+                            return $this->rateLimitError();
+                    }
+                }
+                break;
+            default:
+                return null;
+                break;
+        }
+        return null;
+    }
+
+    /**
+     * 400 错误响应
+     */
+    private function badRequestError(): JsonResponse
+    {
+        return response()->json([
+            'error' => [
+                'message' => 'Invalid request: missing required parameter',
+                'type' => 'invalid_request_error',
+                'param' => null,
+                'code' => null
+            ]
+        ], 400);
+    }
+
+    /**
+     * 500 错误响应
+     */
+    private function internalServerError(): JsonResponse
+    {
+        return response()->json([
+            'error' => [
+                'message' => 'The server had an error while processing your request. Sorry about that!',
+                'type' => 'server_error',
+                'param' => null,
+                'code' => null
+            ]
+        ], 500);
+    }
+
+    /**
+     * 429 限流错误响应
+     */
+    private function rateLimitError(): JsonResponse
+    {
+        return response()->json([
+            'error' => [
+                'message' => 'Rate limit reached for requests',
+                'type' => 'requests',
+                'param' => null,
+                'code' => 'rate_limit_exceeded'
+            ]
+        ], 429);
+    }
+
+    /**
+     * 生成模拟聊天响应
+     */
+    private function generateMockResponse(array $messages): string
+    {
+        $responses = [
+            "这是一个模拟的AI响应。我正在模拟OpenAI的API服务器。",
+            "感谢您的问题!这是一个测试响应,用于模拟真实的AI助手。",
+            "我是一个模拟的AI助手。您的请求已被处理,这是模拟生成的回复。",
+            "模拟Hello! This is a mock response from the simulated OpenAI API server.",
+            "模拟Thank you for your message. This is a simulated response for testing purposes.",
+            "模拟I understand your question. This is a mock reply generated by the test API server.",
+        ];
+
+        return $responses[array_rand($responses)] . " (响应时间: " . date('Y-m-d H:i:s') . ")";
+    }
+
+    /**
+     * 生成模拟文本补全响应
+     */
+    private function generateMockTextResponse(string $prompt): string
+    {
+        $responses = [
+            " 这是对您提示的模拟补全回复。",
+            " Mock completion response for your prompt.",
+            " 模拟的文本补全结果,用于测试目的。",
+            " This is a simulated text completion.",
+            " 基于您的输入生成的模拟响应。",
+        ];
+
+        return $responses[array_rand($responses)];
+    }
+}

+ 36 - 15
api-v8/app/Http/Controllers/TaskStatusController.php

@@ -4,16 +4,18 @@ declare(strict_types=1);
 
 namespace App\Http\Controllers;
 
+use Illuminate\Http\Request;
+use Illuminate\Support\Facades\Log;
+
 use App\Models\Task;
 use App\Models\TaskRelation;
 use App\Models\TaskAssignee;
-use Illuminate\Http\Request;
-use Illuminate\Support\Facades\Log;
+use App\Models\AiModel;
 use App\Http\Resources\TaskResource;
 use App\Http\Api\AuthApi;
 use App\Http\Api\WatchApi;
-use App\Models\AiModel;
-use App\Services\AiTranslateService;
+use App\Http\Api\UserApi;
+
 
 
 class TaskStatusController extends Controller
@@ -126,6 +128,9 @@ class TaskStatusController extends Controller
             case 'restarted':
                 $this->pushChange('restarted', $task->id);
                 break;
+            case 'quit':
+                $this->pushChange('quit', $task->id);
+                break;
             case 'done':
                 $this->pushChange('done', $task->id);
                 $task->finished_at = now();
@@ -190,8 +195,6 @@ class TaskStatusController extends Controller
             if ($aiAssistant) {
                 $aiTask = Task::find($taskId);
                 try {
-                    //$ai = app(AiTranslateService::class);
-                    //$params = $ai->makeByTask($taskId, $aiAssistant->uid, true);
                     \App\Jobs\ProcessAITranslateJob::publish($taskId, $aiAssistant->uid);
                     $aiTask->executor_id = $aiAssistant->uid;
                     $aiTask->status = 'queue';
@@ -209,6 +212,7 @@ class TaskStatusController extends Controller
         }
 
         $allChanged = [];
+        $discussion = app(\App\Services\DiscussionService::class);
         foreach ($this->changeTasks as $key => $tasksId) {
             $allChanged = array_merge($allChanged, $tasksId);
             #change status in related
@@ -226,17 +230,34 @@ class TaskStatusController extends Controller
             if ($key === 'restart') {
                 $data['finished_at'] = null;
             }
+            if ($key === 'quit') {
+                $data['executor_id'] = null;
+            }
             Task::whereIn('id', $tasksId)
                 ->update($data);
-            //发送站内信
-            $send = WatchApi::change(
-                resId: $tasksId,
-                from: $user['user_uid'],
-                message: "任务状态变为 {$key}",
-            );
-            Log::debug('watch message', [
-                'send-to' => $send,
-            ]);
+
+            try {
+                //发送站内信
+                $send = WatchApi::change(
+                    resId: $tasksId,
+                    from: $user['user_uid'],
+                    message: "任务状态变为 {$key}",
+                );
+                Log::debug('watch message', [
+                    'send-to' => $send,
+                ]);
+                $editor = UserApi::getByUuid($user['user_uid']);
+                foreach ($tasksId as $taskId) {
+                    $discussion->create([
+                        'res_id' => $taskId,
+                        'res_type' => 'task',
+                        'title' => "{$editor['nickName']} 将任务状态变为 {$key}",
+                        'editor_uid' => $user['user_uid'],
+                    ]);
+                }
+            } catch (\Throwable $th) {
+                Log::error($th->getMessage());
+            }
         }
 
         //changed tasks

+ 48 - 0
api-v8/app/Http/Requests/StoreDiscussionRequest.php

@@ -0,0 +1,48 @@
+<?php
+
+namespace App\Http\Requests;
+
+use Illuminate\Foundation\Http\FormRequest;
+use App\Http\Api\AuthApi;
+use Illuminate\Support\Facades\Log;
+
+class StoreDiscussionRequest extends FormRequest
+{
+    private $user;
+    /**
+     * Determine if the user is authorized to make this request.
+     *
+     * @return bool
+     */
+    public function authorize()
+    {
+        $user = AuthApi::current($this);
+        if (!$user) {
+            Log::warning('discussion store auth failed', ['request' => $this]);
+            return false;
+        }
+        $this->user = $user;
+        return true;
+    }
+
+    /**
+     * Get the validation rules that apply to the request.
+     *
+     * @return array
+     */
+    public function rules(): array
+    {
+        return [
+            'res_id' => 'required|string',
+            'res_type' => 'required|string',
+        ];
+    }
+
+    public function messages(): array
+    {
+        return [
+            'res_id.required' => 'res_type是必填项',
+            'res_type.required' => 'type是必填项',
+        ];
+    }
+}

+ 1 - 8
api-v8/app/Jobs/ProcessAITranslateJob.php

@@ -45,16 +45,9 @@ class ProcessAITranslateJob extends BaseRabbitMQJob
 
     public static function publish(string $taskId, $aiAssistantId)
     {
-        $us = ['openai.com', 'googleapis.com', 'x.ai', 'anthropic.com'];
         $data = AiTranslateService::makeByTask($taskId, $aiAssistantId);
         $mq = app(RabbitMQService::class);
-        $queue = 'ai_translate_cn';
-        $found = array_filter($us, function ($value) use ($data) {
-            return str_contains($data['model']['url'], $value);
-        });
-        if (count($found) > 0) {
-            $queue = 'ai_translate_us';
-        }
+        $queue = 'ai_translate_v2';
         $mq->publishMessage($queue, $data);
         return count($data['payload']);
     }

+ 22 - 3
api-v8/app/Models/Discussion.php

@@ -9,7 +9,26 @@ class Discussion extends Model
 {
     use HasFactory;
     protected $primaryKey = 'id';
-	protected $casts = [
-		'id' => 'string'
-	];
+    protected $casts = [
+        'id' => 'string'
+    ];
+
+    //批量填充
+    protected $fillable = [
+        'res_id',
+        'res_type',
+        'type',
+        'tpl_id',
+        'title',
+        'content',
+        'content_type',
+        'parent',
+        'editor_uid',
+    ];
+
+    // 设置默认值
+    protected $attributes = [
+        'content_type' => 'markdown',
+        'type' => 'discussion'
+    ];
 }

+ 12 - 1
api-v8/app/Services/AiTranslateService.php

@@ -273,7 +273,8 @@ class AiTranslateService
                 ["role" => "system", "content" => $message->model->system_prompt ?? ''],
                 ["role" => "user", "content" => $message->prompt],
             ],
-            "temperature" => 0.7,
+            "temperature" => 0.3,  # 低随机性,确保准确
+            "top_k" => 20,         # 限制候选词范围
             "stream" => false
         ];
         if ($this->openaiProxy) {
@@ -653,10 +654,20 @@ class AiTranslateService
             ];
             array_push($mqData, $aiMqData);
         }
+
         $output = [
             'model' => $aiModel->toArray(),
             'task' => $task,
         ];
+        $us = ['openai.com', 'googleapis.com', 'x.ai', 'anthropic.com'];
+        $found = array_filter($us, function ($value) use ($output) {
+            return str_contains($output['model']['url'], $value);
+        });
+        if ($found) {
+            $output['area'] = 'us';
+        } else {
+            $output['area'] = 'cn';
+        }
         $output['payload'] = $mqData;
         return $output;
     }

+ 33 - 0
api-v8/app/Services/DiscussionService.php

@@ -0,0 +1,33 @@
+<?php
+
+namespace App\Services;
+
+use App\Models\Discussion;
+use Illuminate\Support\Facades\Hash;
+use App\Http\Api\Mq;
+use App\Http\Resources\DiscussionResource;
+
+class DiscussionService
+{
+    public function create(array $data): Discussion
+    {
+        if (isset($data['parent'])) {
+            $parentInfo = Discussion::find($data['parent']);
+            if (!$parentInfo) {
+                throw new \Exception('没有找到parent', 500);
+            }
+            $data['res_id '] = $parentInfo->res_id;
+            $data['res_type'] = $parentInfo->res_type;
+        }
+        $discussion = Discussion::create($data);
+        //更新parent children_count
+        if (isset($data['parent'])) {
+            $parentInfo->increment('children_count', 1);
+            $parentInfo->save();
+        }
+        if (isset($data['notification']) && $data['notification'] == 'true') {
+            Mq::publish('discussion', new DiscussionResource($discussion));
+        }
+        return $discussion;
+    }
+}

+ 1 - 8
api-v8/config/mint.php

@@ -126,14 +126,7 @@ return [
     ],
     'rabbitmq' => [
         'queues' => [
-            'ai_translate_cn' => [
-                'retry_times' => env('RABBITMQ_AI_RETRY_TIMES', 3),
-                'max_loop_count' => env('RABBITMQ_AI_MAX_LOOP', 10),
-                'timeout' => env('RABBITMQ_AI_TIMEOUT', 300),
-                'dead_letter_queue' => 'ai_translate_dlq',
-                'dead_letter_exchange' => 'ai_translate_dlx',
-            ],
-            'ai_translate_us' => [
+            'ai_translate_v2' => [
                 'retry_times' => env('RABBITMQ_AI_RETRY_TIMES', 3),
                 'max_loop_count' => env('RABBITMQ_AI_MAX_LOOP', 10),
                 'timeout' => env('RABBITMQ_AI_TIMEOUT', 300),

BIN
api-v8/public/assets/images/avatar/openai.png


+ 5 - 0
api-v8/routes/api.php

@@ -116,6 +116,7 @@ use App\Http\Controllers\AiAssistantController;
 use App\Http\Controllers\ModelLogController;
 use App\Http\Controllers\SentenceAttachmentController;
 use App\Http\Controllers\EmailCertificationController;
+use App\Http\Controllers\MockOpenAIController;
 
 
 
@@ -290,4 +291,8 @@ Route::group(['prefix' => 'v2'], function () {
     Route::apiResource('sentence-attachment', SentenceAttachmentController::class);
     Route::apiResource('email-certification', EmailCertificationController::class);
     Route::apiResource('sentence-info', SentenceInfoController::class);
+
+    Route::post('mock/openai/chat/completions', [MockOpenAIController::class, 'chatCompletions']);
+    Route::post('mock/openai/completions', [MockOpenAIController::class, 'completions']);
+    Route::get('mock/openai/models', [MockOpenAIController::class, 'models']);
 });

+ 275 - 0
api-v8/tests/Feature/MockOpenAIApiTest.php

@@ -0,0 +1,275 @@
+<?php
+
+namespace Tests\Feature;
+
+use Tests\TestCase;
+
+class MockOpenAIApiTest extends TestCase
+{
+
+    protected string $baseUrl = '/api/v2/mock/openai';
+    protected string $validApiKey = 'Bearer test-api-key-12345';
+    protected string $invalidApiKey = 'Bearer invalid-key';
+
+
+    /**
+     * 测试聊天完成 API - 成功响应
+     */
+    public function test_chat_completions_success_response()
+    {
+        $response = $this->postJson($this->baseUrl . '/chat/completions', [
+            'model' => 'gpt-3.5-turbo',
+            'messages' => [
+                [
+                    'role' => 'user',
+                    'content' => 'Hello, this is a test message'
+                ]
+            ]
+        ], [
+            'Authorization' => $this->validApiKey
+        ]);
+
+        // 由于有随机错误,我们需要处理可能的错误响应
+        if ($response->status() === 200) {
+            $response->assertStatus(200)
+                ->assertJsonStructure([
+                    'id',
+                    'object',
+                    'created',
+                    'model',
+                    'choices' => [
+                        '*' => [
+                            'index',
+                            'message' => [
+                                'role',
+                                'content'
+                            ],
+                            'finish_reason'
+                        ]
+                    ],
+                    'usage' => [
+                        'prompt_tokens',
+                        'completion_tokens',
+                        'total_tokens'
+                    ]
+                ]);
+
+            $responseData = $response->json();
+            $this->assertEquals('chat.completion', $responseData['object']);
+            $this->assertEquals('gpt-3.5-turbo', $responseData['model']);
+            $this->assertEquals('assistant', $responseData['choices'][0]['message']['role']);
+            $this->assertStringContainsString('模拟', $responseData['choices'][0]['message']['content']);
+        } else {
+            // 如果是错误响应,验证错误格式
+            $this->assertContains($response->status(), [400, 429, 500]);
+            $response->assertJsonStructure([
+                'error' => [
+                    'message',
+                    'type'
+                ]
+            ]);
+        }
+    }
+
+    /**
+     * 测试文本完成 API - 成功响应
+     */
+    public function test_completions_success_response()
+    {
+        $response = $this->postJson($this->baseUrl . '/completions', [
+            'model' => 'text-davinci-003',
+            'prompt' => 'Once upon a time'
+        ], [
+            'Authorization' => $this->validApiKey
+        ]);
+
+        if ($response->status() === 200) {
+            $response->assertStatus(200)
+                ->assertJsonStructure([
+                    'id',
+                    'object',
+                    'created',
+                    'model',
+                    'choices' => [
+                        '*' => [
+                            'text',
+                            'index',
+                            'logprobs',
+                            'finish_reason'
+                        ]
+                    ],
+                    'usage'
+                ]);
+
+            $responseData = $response->json();
+            $this->assertEquals('text_completion', $responseData['object']);
+            $this->assertEquals('text-davinci-003', $responseData['model']);
+        } else {
+            $this->assertContains($response->status(), [400, 429, 500]);
+        }
+    }
+
+    /**
+     * 测试模型列表 API
+     */
+    public function test_models_list_response()
+    {
+        $response = $this->getJson($this->baseUrl . '/models', [
+            'Authorization' => $this->validApiKey
+        ]);
+
+        if ($response->status() === 200) {
+            $response->assertStatus(200)
+                ->assertJsonStructure([
+                    'object',
+                    'data' => [
+                        '*' => [
+                            'id',
+                            'object',
+                            'created',
+                            'owned_by'
+                        ]
+                    ]
+                ]);
+
+            $responseData = $response->json();
+            $this->assertEquals('list', $responseData['object']);
+            $this->assertGreaterThan(0, count($responseData['data']));
+
+            // 验证包含预期的模型
+            $modelIds = collect($responseData['data'])->pluck('id')->toArray();
+            $this->assertContains('gpt-4', $modelIds);
+            $this->assertContains('gpt-3.5-turbo', $modelIds);
+        } else {
+            $this->assertContains($response->status(), [400, 429, 500]);
+        }
+    }
+
+    /**
+     * 测试错误响应格式
+     */
+    public function test_error_response_formats()
+    {
+        // 多次请求以增加遇到错误的概率
+        for ($i = 0; $i < 10; $i++) {
+            $response = $this->postJson($this->baseUrl . '/chat/completions', [
+                'model' => 'gpt-3.5-turbo',
+                'messages' => [
+                    ['role' => 'user', 'content' => "Test message $i"]
+                ]
+            ], [
+                'Authorization' => $this->validApiKey
+            ]);
+
+            if (in_array($response->status(), [400, 429, 500])) {
+                $response->assertJsonStructure([
+                    'error' => [
+                        'message',
+                        'type'
+                    ]
+                ]);
+
+                $errorData = $response->json()['error'];
+                $this->assertNotEmpty($errorData['message']);
+                $this->assertNotEmpty($errorData['type']);
+
+                // 验证特定错误类型
+                switch ($response->status()) {
+                    case 400:
+                        $this->assertEquals('invalid_request_error', $errorData['type']);
+                        break;
+                    case 429:
+                        $this->assertEquals('requests', $errorData['type']);
+                        break;
+                    case 500:
+                        $this->assertEquals('server_error', $errorData['type']);
+                        break;
+                }
+
+                // 找到一个错误响应就足够了
+                break;
+            }
+        }
+    }
+
+    /**
+     * 测试响应时间(延迟)
+     */
+    public function test_response_delay()
+    {
+        $startTime = microtime(true);
+
+        $response = $this->postJson($this->baseUrl . '/chat/completions', [
+            'model' => 'gpt-3.5-turbo',
+            'messages' => [
+                ['role' => 'user', 'content' => 'Test delay']
+            ]
+        ], [
+            'Authorization' => $this->validApiKey
+        ]);
+
+        $endTime = microtime(true);
+        $duration = $endTime - $startTime;
+
+        // 验证至少有1秒延迟
+        $this->assertGreaterThanOrEqual(1, $duration);
+
+        // 记录响应时间用于调试
+        echo "\nResponse time: " . number_format($duration, 2) . " seconds\n";
+    }
+
+    /**
+     * 测试请求参数验证
+     */
+    public function test_request_parameters()
+    {
+        // 测试自定义模型参数
+        $response = $this->postJson($this->baseUrl . '/chat/completions', [
+            'model' => 'gpt-4',
+            'messages' => [
+                ['role' => 'user', 'content' => 'Hello with GPT-4']
+            ],
+            'max_tokens' => 100,
+            'temperature' => 0.7
+        ], [
+            'Authorization' => $this->validApiKey
+        ]);
+
+        if ($response->status() === 200) {
+            $responseData = $response->json();
+            $this->assertEquals('gpt-4', $responseData['model']);
+        }
+    }
+
+    /**
+     * 测试并发请求
+     */
+    public function test_concurrent_requests()
+    {
+        $promises = [];
+        $responses = [];
+
+        // 发送5个并发请求
+        for ($i = 0; $i < 5; $i++) {
+            $responses[] = $this->postJson($this->baseUrl . '/chat/completions', [
+                'model' => 'gpt-3.5-turbo',
+                'messages' => [
+                    ['role' => 'user', 'content' => "Concurrent test $i"]
+                ]
+            ], [
+                'Authorization' => $this->validApiKey
+            ]);
+        }
+
+        // 验证所有响应
+        foreach ($responses as $index => $response) {
+            $this->assertContains($response->status(), [200, 400, 429, 500]);
+
+            if ($response->status() === 200) {
+                $response->assertJsonStructure(['id', 'object', 'choices']);
+            } else {
+                $response->assertJsonStructure(['error']);
+            }
+        }
+    }
+}

+ 1 - 1
dashboard-v4/dashboard/.env.orig

@@ -14,4 +14,4 @@ REACT_APP_ASSETS_SERVER=https://assets.wikipali.org
 REACT_APP_API_SERVER=https://www.wikipali.org
 REACT_APP_ICP_CODE=
 REACT_APP_QUESTIONNAIRE_LINK=
-REACT_APP_OPENAI_PROXY=https://jp.wikipali.org/api/openai
+REACT_APP_OPENAI_PROXY=https://staging.ai.jp.wikipali.org/api/openai

+ 4 - 1
dashboard-v4/dashboard/src/components/api/task.ts

@@ -27,7 +27,9 @@ export type TTaskStatus =
   | "canceled"
   | "expired"
   | "queue"
-  | "stop";
+  | "stop"
+  | "quit"
+  | "pause";
 export const StatusButtons: TTaskStatus[] = [
   "pending",
   "published",
@@ -35,6 +37,7 @@ export const StatusButtons: TTaskStatus[] = [
   "done",
   "restarted",
   "requested_restart",
+  "quit",
 ];
 export type TTaskType = "instance" | "workflow" | "group";
 

+ 263 - 325
dashboard-v4/dashboard/src/components/chat/AiChat.tsx

@@ -2,40 +2,45 @@ import React, { useState, useRef, useEffect, useCallback } from "react";
 import {
   Input,
   Button,
-  Avatar,
   Dropdown,
-  message,
   Tooltip,
   Space,
-  Spin,
   MenuProps,
   Card,
   Affix,
 } from "antd";
 import {
   SendOutlined,
-  CopyOutlined,
-  EditOutlined,
-  ReloadOutlined,
   DownOutlined,
-  UserOutlined,
-  RobotOutlined,
   PaperClipOutlined,
 } from "@ant-design/icons";
-import Marked from "../general/Marked";
 import { IAiModel, IAiModelListResponse } from "../api/ai";
 import { get } from "../../request";
-import User from "../auth/User";
+import MsgUser from "./MsgUser";
+import MsgAssistant from "./MsgAssistant";
+import MsgTyping from "./MsgTyping";
+import MsgLoading from "./MsgLoading";
+import MsgSystem from "./MsgSystem";
+import MsgError from "./MsgError";
+import PromptButtonGroup from "./PromptButtonGroup";
+import { useAppSelector } from "../../hooks";
+import { currentUser } from "../../reducers/current-user";
 
 const { TextArea } = Input;
 
 // 类型定义
-interface Message {
+export interface MessageVersion {
   id: number;
-  type: "user" | "ai";
   content: string;
+  model: string;
+  role: "system" | "user" | "assistant";
   timestamp: string;
-  model?: string;
+}
+
+export interface Message {
+  id: number;
+  type: "user" | "ai" | "error";
+  versions: MessageVersion[];
 }
 
 interface OpenAIMessage {
@@ -43,11 +48,6 @@ interface OpenAIMessage {
   content: string;
 }
 
-interface AIModel {
-  key: string;
-  label: string;
-}
-
 interface StreamTypeController {
   addToken: (token: string) => void;
   complete: () => void;
@@ -61,11 +61,16 @@ interface OpenAIStreamResponse {
   }>;
 }
 
+const endOfMsg = (msg: Message) => {
+  return msg.versions[msg.versions.length - 1];
+};
+
 interface IWidget {
   initMessage?: string;
   systemPrompt?: string;
   onChat?: () => void;
 }
+
 const AIChatComponent = ({
   initMessage,
   systemPrompt = "你是一个巴利语专家",
@@ -75,11 +80,19 @@ const AIChatComponent = ({
   const [inputValue, setInputValue] = useState<string>("");
   const [isLoading, setIsLoading] = useState<boolean>(false);
   const [selectedModel, setSelectedModel] = useState<string>("");
+  const [fetchModel, setFetchModel] = useState<string>("");
+  const [refreshingMessageId, setRefreshingMessageId] = useState<number | null>(
+    null
+  );
 
   const messagesEndRef = useRef<HTMLDivElement>(null);
   const [isTyping, setIsTyping] = useState<boolean>(false);
   const [currentTypingMessage, setCurrentTypingMessage] = useState<string>("");
-  const [models, setModels] = useState<IAiModel[]>(); // 可用的AI模型
+  const [models, setModels] = useState<IAiModel[]>();
+
+  const [error, setError] = useState<string>();
+
+  const user = useAppSelector(currentUser);
 
   const scrollToBottom = useCallback(() => {
     messagesEndRef.current?.scrollIntoView({
@@ -109,34 +122,9 @@ const AIChatComponent = ({
     if (initMessage) {
       setMessages([]);
       setInputValue(initMessage);
-      sendMessage();
     }
   }, [initMessage]);
-  // 打字机效果 - 支持流式输入
-  const typeWriter = useCallback(
-    (text: string, callback: () => void): NodeJS.Timeout => {
-      setIsTyping(true);
-      setCurrentTypingMessage("");
-      let index = 0;
-
-      const timer = setInterval(() => {
-        if (index < text.length) {
-          setCurrentTypingMessage((prev) => prev + text.charAt(index));
-          index++;
-        } else {
-          clearInterval(timer);
-          setIsTyping(false);
-          setCurrentTypingMessage("");
-          callback();
-        }
-      }, 30);
 
-      return timer;
-    },
-    []
-  );
-
-  // 流式打字机效果
   const streamTypeWriter = useCallback(
     (
       onToken?: (content: string) => void,
@@ -167,41 +155,48 @@ const AIChatComponent = ({
     []
   );
 
-  // 调用OpenAI API - 支持流式输出
   const callOpenAI = useCallback(
-    async (messages: OpenAIMessage[]): Promise<void> => {
-      setIsLoading(false); // 开始流式输出时取消loading状态
+    async (
+      messages: OpenAIMessage[],
+      modelId: string,
+      isRegenerate: boolean = false,
+      messageIndex?: number
+    ): Promise<{ success: boolean; content?: string; error?: string }> => {
+      setError(undefined);
       if (typeof process.env.REACT_APP_OPENAI_PROXY === "undefined") {
         console.error("no REACT_APP_OPENAI_PROXY");
-        return;
+        return { success: false, error: "API配置错误" };
       }
+
       try {
+        setFetchModel(modelId);
         const payload = {
-          model: models?.find((value) => value.uid === selectedModel)?.model,
+          model: models?.find((value) => value.uid === modelId)?.model,
           messages: messages,
           stream: true,
           temperature: 0.7,
-          max_tokens: 2000,
+          max_tokens: 3000, //本次回复”最大输出长度
         };
         const url = process.env.REACT_APP_OPENAI_PROXY;
-        console.info("api request", url, payload);
+        const data = {
+          model_id: modelId,
+          payload: payload,
+        };
+        console.info("api request", url, data);
+        setIsLoading(true);
         const response = await fetch(url, {
           method: "POST",
           headers: {
             "Content-Type": "application/json",
-            Authorization: `Bearer AIzaSyCzr8KqEdaQ3cRCxsFwSHh8c7kF3RZTZWw`, // 或你的API密钥
+            Authorization: `Bearer AIzaSyCzr8KqEdaQ3cRCxsFwSHh8c7kF3RZTZWw`,
           },
-          body: JSON.stringify({
-            model_id: selectedModel,
-            payload: payload,
-          }),
+          body: JSON.stringify(data),
         });
 
         if (!response.ok) {
           throw new Error(`HTTP error! status: ${response.status}`);
         }
 
-        // 处理流式响应
         const reader = response.body?.getReader();
         if (!reader) {
           throw new Error("无法获取响应流");
@@ -210,21 +205,39 @@ const AIChatComponent = ({
         const decoder = new TextDecoder();
         let buffer = "";
 
-        // 创建流式打字机效果
         const typeController = streamTypeWriter(
-          (content: string) => {
-            // 每次添加token时的回调
-          },
+          (content: string) => {},
           (finalContent: string) => {
-            // 完成时的回调
-            const aiMessage: Message = {
+            console.log("newData in callOpenAI", finalContent);
+            const newData: MessageVersion = {
               id: Date.now(),
-              type: "ai",
               content: finalContent,
+              model: modelId,
+              role: "assistant",
               timestamp: new Date().toLocaleTimeString(),
-              model: selectedModel,
             };
-            setMessages((prev) => [...prev, aiMessage]);
+            if (isRegenerate && messageIndex !== undefined) {
+              setMessages((prev) => {
+                const newMessages = [...prev];
+                const targetMessage = newMessages[messageIndex];
+                if (targetMessage) {
+                  if (!targetMessage.versions) {
+                    targetMessage.versions = [];
+                  }
+                  targetMessage.versions.push(newData);
+                }
+                setRefreshingMessageId(null);
+                return newMessages;
+              });
+            } else {
+              const aiMessage: Message = {
+                id: Date.now(),
+                type: "ai",
+                versions: [newData],
+              };
+              setMessages((prev) => [...prev, aiMessage]);
+              setRefreshingMessageId(null);
+            }
           }
         );
 
@@ -234,7 +247,7 @@ const AIChatComponent = ({
 
             if (done) {
               typeController.complete();
-              break;
+              return { success: true, content: currentTypingMessage };
             }
 
             buffer += decoder.decode(value, { stream: true });
@@ -248,7 +261,7 @@ const AIChatComponent = ({
 
                 if (data === "[DONE]") {
                   typeController.complete();
-                  return;
+                  return { success: true, content: currentTypingMessage };
                 }
 
                 try {
@@ -267,149 +280,161 @@ const AIChatComponent = ({
         } catch (error) {
           console.error("读取流数据失败:", error);
           typeController.complete();
-          throw error;
+          return { success: false, error: "读取响应流失败" };
         }
       } catch (error) {
         console.error("API调用失败:", error);
-
-        // 如果真实API失败,回退到模拟响应
-        const mockResponse = await simulateAIResponse(messages);
-        typeWriter(mockResponse, () => {
-          const aiMessage: Message = {
-            id: Date.now(),
-            type: "ai",
-            content: mockResponse,
-            timestamp: new Date().toLocaleTimeString(),
-            model: selectedModel,
-          };
-          setMessages((prev) => [...prev, aiMessage]);
-        });
+        return { success: false, error: "API调用失败,请重试" };
       }
     },
-    [selectedModel, streamTypeWriter, typeWriter]
-  );
-
-  // 模拟AI响应(作为备用方案)
-  const simulateAIResponse = useCallback(
-    async (conversationHistory: OpenAIMessage[]): Promise<string> => {
-      return new Promise((resolve) => {
-        setTimeout(() => {
-          const lastUserMessage =
-            conversationHistory[conversationHistory.length - 1]?.content || "";
-          const responses = [
-            '这是一个很好的问题。让我来为你详细解答这个关于 "' +
-              lastUserMessage +
-              '" 的问题。\n\n首先,我需要说明的是,这个话题涉及多个方面的考虑。从技术层面来看,我们需要考虑实现的可行性和复杂度。从用户体验的角度,我们要确保解决方案既实用又易于理解。\n\n希望这个回答对你有帮助!',
-            '我理解你的意思。根据我的知识,关于 "' +
-              lastUserMessage +
-              '" 这个问题,我可以从以下几个角度来分析:\n\n1. 首先是基本概念的理解\n2. 然后是实际应用场景\n3. 最后是注意事项和建议\n\n这样的分析方法能够帮助我们更全面地理解这个问题。',
-            '感谢你的提问。关于 "' +
-              lastUserMessage +
-              '" 这个话题,我的看法是这样的:\n\n这确实是一个值得深入探讨的问题。在我看来,解决这类问题的关键在于找到平衡点,既要考虑效率,也要考虑可维护性。\n\n让我知道如果你需要更详细的解释!',
-          ];
-          resolve(responses[Math.floor(Math.random() * responses.length)]);
-        }, 1000);
-      });
-    },
-    []
+    [models, streamTypeWriter, currentTypingMessage]
   );
 
-  // 发送消息到AI
   const sendMessage = useCallback(
     async (messageText: string = inputValue): Promise<void> => {
       if (!messageText.trim()) return;
 
-      const userMessage: Message = {
+      const newData: MessageVersion = {
         id: Date.now(),
-        type: "user",
         content: messageText,
+        model: "",
+        role: "user",
         timestamp: new Date().toLocaleTimeString(),
       };
+      const userMessage: Message = {
+        id: Date.now(),
+        type: "user",
+        versions: [newData],
+      };
 
       setMessages((prev) => [...prev, userMessage]);
       setInputValue("");
       setIsLoading(true);
-      onChat && onChat();
+
+      // Scroll to the new user message
+      scrollToBottom();
+
       try {
-        // 构建对话历史
         const conversationHistory: OpenAIMessage[] = [
           { role: "system", content: systemPrompt },
           ...messages.map((msg) => {
-            const newMsg: OpenAIMessage = {
+            const data: OpenAIMessage = {
               role: msg.type === "user" ? "user" : "assistant",
-              content: msg.content,
+              content: msg.versions[msg.versions.length - 1].content,
             };
-            return newMsg;
+            return data;
           }),
           { role: "user", content: messageText },
         ];
 
-        // 调用OpenAI API
-        await callOpenAI(conversationHistory);
+        const result = await callOpenAI(conversationHistory, selectedModel);
+        setIsLoading(false);
+        if (!result.success) {
+          setError("请求失败,请重试");
+        }
       } catch (error) {
         console.error("发送消息失败:", error);
-        message.error("发送消息失败,请重试");
+        setError("请求失败,请重试");
         setIsLoading(false);
-        setIsTyping(false);
       }
     },
-    [inputValue, messages, systemPrompt, callOpenAI]
+    [
+      inputValue,
+      scrollToBottom,
+      systemPrompt,
+      messages,
+      callOpenAI,
+      selectedModel,
+    ]
   );
 
-  // 复制消息内容
-  const copyMessage = useCallback(async (content: string): Promise<void> => {
-    try {
-      await navigator.clipboard.writeText(content);
-      message.success("已复制到剪贴板");
-    } catch (error) {
-      console.error("复制失败:", error);
-      message.error("复制失败");
-    }
-  }, []);
-
-  // 刷新AI回答
   const refreshAIResponse = useCallback(
-    async (messageIndex: number): Promise<void> => {
+    async (messageIndex: number, modelId: string): Promise<void> => {
+      console.debug("refresh", messageIndex);
       const userMessage = messages[messageIndex - 1];
       if (userMessage && userMessage.type === "user") {
-        // 重新构建到该消息为止的对话历史
+        setRefreshingMessageId(messages[messageIndex].id);
         const conversationHistory: OpenAIMessage[] = [
           { role: "system", content: systemPrompt },
           ...messages.slice(0, messageIndex - 1).map((msg) => {
-            const newMsg: OpenAIMessage = {
+            const data: OpenAIMessage = {
               role: msg.type === "user" ? "user" : "assistant",
-              content: msg.content,
+              content: endOfMsg(msg).content,
             };
-            return newMsg;
+            return data;
           }),
-          { role: "user", content: userMessage.content },
+          { role: "user", content: endOfMsg(userMessage).content },
         ];
 
-        // 移除旧的AI回答
-        setMessages((prev) => prev.slice(0, messageIndex));
-
         try {
-          await callOpenAI(conversationHistory);
+          const result = await callOpenAI(
+            conversationHistory,
+            modelId,
+            true,
+            messageIndex
+          );
+          setIsLoading(false);
+          if (!result.success) {
+            setError("重新生成失败,请重试");
+            setRefreshingMessageId(null);
+          } else {
+            /*
+            console.log("newData refreshAIResponse", result);
+            setMessages((prev) => {
+              const newMessages = [...prev];
+              const targetMessage = newMessages[messageIndex];
+              if (targetMessage) {
+                const newData: MessageVersion = {
+                  id: Date.now(),
+                  content: result.content || "",
+                  model: modelId,
+                  role: "assistant",
+                  timestamp: new Date().toLocaleTimeString(),
+                };
+                targetMessage.type = "ai"; // Update type to "ai"
+                if (!targetMessage.versions) {
+                  targetMessage.versions = [];
+                }
+                targetMessage.versions.push(newData);
+              }
+              setRefreshingMessageId(null);
+              return newMessages;
+            });
+            */
+          }
         } catch (error) {
           console.error("刷新回答失败:", error);
-          message.error("刷新回答失败,请重试");
+          setIsLoading(false);
+          setError("请求失败,请重试");
+          setRefreshingMessageId(null);
         }
       }
     },
     [messages, systemPrompt, callOpenAI]
   );
 
-  // 编辑用户消息
-  const editUserMessage = useCallback(
-    (messageIndex: number, newContent: string): void => {
-      const updatedMessages = [...messages];
-      updatedMessages[messageIndex].content = newContent;
-      setMessages(updatedMessages);
-    },
-    [messages]
-  );
+  const confirmEdit = useCallback((id: number, text: string): void => {
+    setMessages((prev) => {
+      const newMessages = [...prev];
+      const messageIndex = newMessages.findIndex((m) => m.id === id);
+      if (messageIndex !== -1) {
+        const message = newMessages[messageIndex];
+        if (!message.versions) {
+          message.versions = [];
+        }
+        const newData: MessageVersion = {
+          id: Date.now(),
+          content: text,
+          model: "",
+          role: "user",
+          timestamp: new Date().toLocaleTimeString(),
+        };
+        message.versions.push(newData);
+      }
+      return newMessages;
+    });
+  }, []);
 
-  // 处理键盘事件
   const handleKeyPress = useCallback(
     (e: React.KeyboardEvent<HTMLTextAreaElement>): void => {
       if (e.key === "Enter" && !e.shiftKey) {
@@ -420,194 +445,102 @@ const AIChatComponent = ({
     [sendMessage]
   );
 
-  // 模型选择菜单
   const modelMenu: MenuProps = {
     selectedKeys: [selectedModel],
-    onClick: ({ key }) => setSelectedModel(key),
+    onClick: ({ key }) => {
+      console.log("setSelectedModel", key);
+      setSelectedModel(key);
+    },
     items: models?.map((model) => ({
       key: model.uid,
       label: model.name,
     })),
   };
 
-  // 刷新按钮的下拉菜单
-  const refreshMenu = useCallback(
-    (messageIndex: number): MenuProps => ({
-      onClick: ({ key }) => {
-        if (key === "refresh") {
-          refreshAIResponse(messageIndex);
-        }
-      },
-      items: [
-        {
-          key: "refresh",
-          label: "重新生成",
-        },
-        {
-          type: "divider",
-        },
-        {
-          key: "model-submenu",
-          label: "选择模型重新生成",
-          children: models?.map((model) => ({
-            key: model.uid,
-            label: model.name,
-            onClick: () => {
-              setSelectedModel(model.uid);
-              refreshAIResponse(messageIndex);
-            },
-          })),
-        },
-      ],
-    }),
-    [refreshAIResponse, models]
-  );
-
-  return (
-    <div className="flex flex-col h-screen bg-gray-50">
-      {/* 聊天显示窗口 */}
-      <div className="flex-1 overflow-y-auto p-4 space-y-4">
-        {messages.map((msg, index) => (
-          <div
-            key={msg.id}
-            className={`flex ${
-              msg.type === "user" ? "justify-end" : "justify-start"
-            }`}
-          >
-            <div
-              className={`group max-w-[70%] ${
-                msg.type === "user"
-                  ? "bg-blue-500 text-white rounded-l-lg rounded-tr-lg"
-                  : "bg-white border rounded-r-lg rounded-tl-lg shadow-sm"
-              } p-4 relative`}
-            >
-              <div className="flex items-start space-x-3">
-                <Avatar
-                  size={32}
-                  icon={
-                    msg.type === "user" ? <UserOutlined /> : <RobotOutlined />
-                  }
-                  className={
-                    msg.type === "user" ? "bg-blue-600" : "bg-gray-500"
-                  }
-                />
-                <div className="flex-1">
-                  <div className="text-sm font-medium mb-1">
-                    {msg.type === "user"
-                      ? "你"
-                      : msg.model
-                      ? models?.find((m) => m.uid === msg.model)?.name
-                      : "AI助手"}
-                  </div>
-                  <div className="text-sm leading-relaxed whitespace-pre-wrap">
-                    <Marked text={msg.content} />
-                  </div>
-                  <div className="text-xs opacity-60 mt-2">{msg.timestamp}</div>
-                </div>
-              </div>
-
-              {/* 悬浮工具按钮 */}
-              <div
-                className="absolute top-2 right-2 opacity-0 group-hover:opacity-100 transition-opacity"
-                style={{ textAlign: "right" }}
-              >
-                <Space size="small">
-                  <Tooltip title="复制">
-                    <Button
-                      size="small"
-                      type="text"
-                      icon={<CopyOutlined />}
-                      onClick={() => copyMessage(msg.content)}
-                    />
-                  </Tooltip>
-                  {msg.type === "user" ? (
-                    <Tooltip title="编辑">
-                      <Button
-                        size="small"
-                        type="text"
-                        icon={<EditOutlined />}
-                        onClick={() => {
-                          const newContent = prompt("编辑消息:", msg.content);
-                          if (newContent !== null) {
-                            editUserMessage(index, newContent);
-                          }
-                        }}
-                      />
-                    </Tooltip>
-                  ) : (
-                    <Dropdown menu={refreshMenu(index)} trigger={["hover"]}>
-                      <Button
-                        size="small"
-                        type="text"
-                        icon={<ReloadOutlined />}
-                      />
-                    </Dropdown>
-                  )}
-                </Space>
-              </div>
-            </div>
-          </div>
-        ))}
-
-        {/* 显示AI正在输入的消息 */}
-        {isTyping && (
-          <div className="flex justify-start">
-            <div className="max-w-[70%] bg-white border rounded-r-lg rounded-tl-lg shadow-sm p-4">
-              <div className="flex items-start space-x-3">
-                <Avatar
-                  size={32}
-                  icon={<RobotOutlined />}
-                  className="bg-gray-500"
-                />
-                <div className="flex-1">
-                  <div className="text-sm font-medium mb-1">
-                    {models?.find((m) => m.uid === selectedModel)?.name ||
-                      "AI助手"}
-                  </div>
-                  <Marked text={currentTypingMessage} />
-                </div>
-              </div>
-            </div>
-          </div>
-        )}
-
-        {isLoading && !isTyping && (
-          <div className="flex justify-start">
-            <div className="max-w-[70%] bg-white border rounded-r-lg rounded-tl-lg shadow-sm p-4">
-              <div className="flex items-center space-x-3">
-                <Avatar
-                  size={32}
-                  icon={<RobotOutlined />}
-                  className="bg-gray-500"
-                />
-                <Spin size="small" />
-                <span className="text-sm text-gray-500">正在思考...</span>
-              </div>
-            </div>
-          </div>
-        )}
-
+  return user ? (
+    <div
+      style={{
+        display: "flex",
+        flexDirection: "column",
+        width: "100%",
+      }}
+    >
+      <div style={{ flex: 1, overflowY: "auto", padding: "16px" }}>
+        <Space direction="vertical" size="middle" style={{ width: "100%" }}>
+          <MsgSystem value={systemPrompt} />
+          {messages.map((msg, index) => {
+            if (msg.id === refreshingMessageId) {
+              return <></>;
+            } else {
+              if (msg.type === "user") {
+                return (
+                  <MsgUser
+                    key={index}
+                    msg={msg}
+                    onChange={(value: string) => confirmEdit(index, value)}
+                  />
+                );
+              } else if (msg.type === "ai") {
+                return (
+                  <MsgAssistant
+                    key={index}
+                    msg={msg}
+                    models={models}
+                    onRefresh={(modelId: string) => {
+                      refreshAIResponse(index, modelId);
+                    }}
+                  />
+                );
+              } else {
+                return <>unknown</>;
+              }
+            }
+          })}
+          {error ? (
+            <MsgError
+              message={error}
+              onRefresh={() =>
+                refreshAIResponse(messages.length - 1, fetchModel)
+              }
+            />
+          ) : (
+            <></>
+          )}
+          {isTyping && (
+            <MsgTyping
+              text={currentTypingMessage}
+              model={models?.find((m) => m.uid === fetchModel)}
+            />
+          )}
+
+          {isLoading && !isTyping && (
+            <MsgLoading model={models?.find((m) => m.uid === fetchModel)} />
+          )}
+        </Space>
         <div ref={messagesEndRef} />
       </div>
 
-      {/* 用户输入区域 */}
       <Affix offsetBottom={10}>
-        <Card bordered={true} style={{ borderRadius: 10, borderColor: "gray" }}>
-          <div className="max-w-4xl mx-auto">
-            {/* 输入框 */}
-            <div style={{ display: "flex" }}>
+        <Card style={{ borderRadius: "10px", borderColor: "#d9d9d9" }}>
+          <div style={{ maxWidth: "1200px", margin: "0 auto" }}>
+            <div style={{ display: "flex", marginBottom: "8px" }}>
               <TextArea
                 value={inputValue}
                 onChange={(e) => setInputValue(e.target.value)}
                 onKeyPress={handleKeyPress}
                 placeholder="提出你的问题,如:总结下面的内容..."
                 autoSize={{ minRows: 1, maxRows: 6 }}
-                className="resize-none pr-12"
+                style={{ resize: "none", paddingRight: "48px" }}
               />
             </div>
 
-            {/* 功能按钮和模型选择 */}
-            <div style={{ display: "flex", justifyContent: "space-between" }}>
+            <div
+              style={{
+                display: "flex",
+                justifyContent: "space-between",
+                alignItems: "center",
+              }}
+            >
               <Space>
                 <Tooltip title="附加文件">
                   <Button
@@ -616,8 +549,9 @@ const AIChatComponent = ({
                     icon={<PaperClipOutlined />}
                   />
                 </Tooltip>
+                <PromptButtonGroup onText={setInputValue} />
               </Space>
-              <div>
+              <Space>
                 <Dropdown menu={modelMenu} trigger={["click"]}>
                   <Button size="small" type="text">
                     {models?.find((m) => m.uid === selectedModel)?.name}
@@ -627,16 +561,20 @@ const AIChatComponent = ({
                 <Button
                   type="primary"
                   icon={<SendOutlined />}
-                  onClick={() => sendMessage()}
+                  onClick={() => {
+                    sendMessage();
+                    onChat && onChat();
+                  }}
                   disabled={!inputValue.trim() || isLoading}
-                  className="absolute right-2 bottom-2"
                 />
-              </div>
+              </Space>
             </div>
           </div>
         </Card>
       </Affix>
     </div>
+  ) : (
+    <></>
   );
 };
 

+ 152 - 0
dashboard-v4/dashboard/src/components/chat/MsgAssistant.tsx

@@ -0,0 +1,152 @@
+import { Button, Dropdown, message, Space, Tooltip, Typography } from "antd";
+import { Message } from "./AiChat";
+
+import {
+  CopyOutlined,
+  ReloadOutlined,
+  LeftOutlined,
+  RightOutlined,
+} from "@ant-design/icons";
+import { IAiModel } from "../api/ai";
+import { useEffect, useState } from "react";
+import { MenuProps } from "antd/es/menu";
+import Marked from "../general/Marked";
+import MsgContainer from "./MsgContainer";
+
+const { Text } = Typography;
+
+interface IWidget {
+  msg?: Message;
+  models?: IAiModel[];
+  onRefresh?: (modelId: string) => void;
+}
+
+const MsgAssistant = ({ msg, models, onRefresh }: IWidget) => {
+  const [currentVersion, setCurrentVersion] = useState(0);
+
+  useEffect(() => {
+    if (msg) {
+      setCurrentVersion(msg?.versions.length - 1);
+    }
+  }, [msg]);
+
+  const switchMessageVersion = (direction: "prev" | "next"): void => {
+    if (msg && msg.versions) {
+      const maxIndex = msg.versions.length - 1;
+
+      let newIndex = currentVersion;
+      if (direction === "prev" && currentVersion > 0) {
+        newIndex = currentVersion - 1;
+      } else if (direction === "next" && currentVersion < maxIndex) {
+        newIndex = currentVersion + 1;
+      }
+      setCurrentVersion(newIndex);
+    }
+  };
+
+  const refreshMenu: MenuProps = {
+    onClick: ({ key }) => {
+      if (key === "refresh" && msg) {
+        onRefresh && onRefresh(msg.versions[currentVersion].model);
+      }
+    },
+    items: [
+      {
+        key: "refresh",
+        label: "重新生成",
+      },
+      {
+        type: "divider",
+      },
+      {
+        key: "model-submenu",
+        label: "选择模型重新生成",
+        children: models?.map((model, id) => ({
+          key: model.uid,
+          label: model.name,
+          onClick: () => {
+            onRefresh && onRefresh(model.uid);
+          },
+        })),
+      },
+    ],
+  };
+  return (
+    <MsgContainer>
+      <div
+        style={{
+          fontSize: "14px",
+          fontWeight: 500,
+          marginBottom: "4px",
+        }}
+      >
+        {msg?.versions[currentVersion].model
+          ? models?.find((m) => m.uid === msg.versions[currentVersion].model)
+              ?.name
+          : "AI助手"}
+      </div>
+      <div>
+        <Marked text={msg?.versions[currentVersion].content} />
+      </div>
+      <div>
+        <Space>
+          {msg?.versions && msg.versions.length > 1 && (
+            <div style={{ marginBottom: "8px" }}>
+              <Space size="small">
+                <Button
+                  size="small"
+                  type="text"
+                  icon={<LeftOutlined />}
+                  disabled={currentVersion === 0}
+                  onClick={() => switchMessageVersion("prev")}
+                />
+                <Text
+                  style={{
+                    fontSize: "12px",
+                    color:
+                      msg.type === "user" ? "rgba(255,255,255,0.7)" : "#666",
+                  }}
+                >
+                  {(currentVersion || 0) + 1}/{msg.versions.length}
+                </Text>
+                <Button
+                  size="small"
+                  type="text"
+                  icon={<RightOutlined />}
+                  disabled={currentVersion === msg.versions.length - 1}
+                  onClick={() => switchMessageVersion("next")}
+                />
+              </Space>
+            </div>
+          )}
+          <div>
+            <Space size="small">
+              <Tooltip title="复制">
+                <Button
+                  size="small"
+                  type="text"
+                  icon={<CopyOutlined />}
+                  onClick={() => {
+                    msg &&
+                      navigator.clipboard
+                        .writeText(msg.versions[currentVersion].content)
+                        .then((value) => message.success("已复制到剪贴板"))
+                        .catch((reason: any) => {
+                          console.error("复制失败:", reason);
+                          message.error("复制失败");
+                        });
+                  }}
+                />
+              </Tooltip>
+              <Dropdown menu={refreshMenu} trigger={["hover"]}>
+                <Button size="small" type="text" icon={<ReloadOutlined />} />
+              </Dropdown>
+            </Space>
+          </div>
+        </Space>
+      </div>
+    </MsgContainer>
+  );
+};
+
+export default MsgAssistant;

+ 29 - 0
dashboard-v4/dashboard/src/components/chat/MsgContainer.tsx

@@ -0,0 +1,29 @@
+interface IWidget {
+  children?: React.ReactNode;
+}
+const MsgContainer = ({ children }: IWidget) => {
+  return (
+    <div
+      style={{
+        display: "flex",
+        justifyContent: "flex-start",
+      }}
+    >
+      <div
+        style={{
+          maxWidth: "95%",
+          color: "black",
+          borderRadius: "8px",
+          padding: "16px",
+          border: "none",
+          boxShadow: "0 1px 2px rgba(0, 0, 0, 0.03)",
+          textAlign: "left",
+        }}
+      >
+        {children}
+      </div>
+    </div>
+  );
+};
+
+export default MsgContainer;

+ 23 - 0
dashboard-v4/dashboard/src/components/chat/MsgError.tsx

@@ -0,0 +1,23 @@
+import { Alert, Button } from "antd";
+import { ReloadOutlined } from "@ant-design/icons";
+interface IWidget {
+  message?: string;
+  onRefresh?: () => void;
+}
+const MsgError = ({ message, onRefresh }: IWidget) => {
+  return (
+    <Alert
+      type="error"
+      closable={false}
+      showIcon
+      message={message}
+      action={
+        <Button type="text" icon={<ReloadOutlined />} onClick={onRefresh}>
+          刷新
+        </Button>
+      }
+    />
+  );
+};
+
+export default MsgError;

+ 20 - 0
dashboard-v4/dashboard/src/components/chat/MsgLoading.tsx

@@ -0,0 +1,20 @@
+import MsgContainer from "./MsgContainer";
+import { IAiModel } from "../api/ai";
+import User from "../auth/User";
+import { Space } from "antd";
+
+interface IWidget {
+  model?: IAiModel;
+}
+const MsgLoading = ({ model }: IWidget) => {
+  return (
+    <MsgContainer>
+      <Space>
+        <User {...model?.user} />
+        正在思考……
+      </Space>
+    </MsgContainer>
+  );
+};
+
+export default MsgLoading;

+ 42 - 0
dashboard-v4/dashboard/src/components/chat/MsgSystem.tsx

@@ -0,0 +1,42 @@
+import { useState } from "react";
+
+interface IWidget {
+  value?: string;
+}
+const MsgSystem = ({ value }: IWidget) => {
+  const [display, setDisplay] = useState(false);
+  return (
+    <div
+      style={{
+        backgroundColor: "#fafafa",
+        border: "1px dashed #d9d9d9",
+        borderRadius: 4,
+        marginTop: 8,
+        fontSize: 12,
+        color: "#888",
+        padding: 8,
+      }}
+    >
+      <div
+        style={{ cursor: "pointer", userSelect: "none" }}
+        onClick={() => {
+          setDisplay(!display);
+        }}
+      >
+        {display ? "▼ 收起资料" : "▶ 展开资料"}
+      </div>
+      <div style={{ display: display ? "block" : "none" }}>
+        <pre
+          style={{
+            whiteSpace: "pre-wrap",
+            wordBreak: "break-word",
+            marginTop: 4,
+          }}
+        >
+          {value}
+        </pre>
+      </div>
+    </div>
+  );
+};
+export default MsgSystem;

+ 23 - 0
dashboard-v4/dashboard/src/components/chat/MsgTyping.tsx

@@ -0,0 +1,23 @@
+import MsgContainer from "./MsgContainer";
+import { IAiModel } from "../api/ai";
+import Marked from "../general/Marked";
+import User from "../auth/User";
+
+interface IWidget {
+  model?: IAiModel;
+  text?: string;
+}
+const MsgTyping = ({ model, text }: IWidget) => {
+  return (
+    <MsgContainer>
+      <div>
+        <User {...model?.user} />
+      </div>
+      <div>
+        <Marked text={text} />
+      </div>
+    </MsgContainer>
+  );
+};
+
+export default MsgTyping;

+ 149 - 0
dashboard-v4/dashboard/src/components/chat/MsgUser.tsx

@@ -0,0 +1,149 @@
+import { useCallback, useEffect, useState } from "react";
+import { Message } from "./AiChat";
+import Marked from "../general/Marked";
+import TextArea from "antd/lib/input/TextArea";
+import { Button, message, Space, Tooltip } from "antd";
+
+import {
+  CheckOutlined,
+  CloseOutlined,
+  CopyOutlined,
+  EditOutlined,
+} from "@ant-design/icons";
+
+interface IWidget {
+  msg?: Message;
+  onChange?: (value: string) => void;
+}
+
+const MsgUser = ({ msg, onChange }: IWidget) => {
+  const [editing, setEditing] = useState(false);
+  const [current, setCurrent] = useState(0);
+  const [content, setContent] = useState<string>("");
+
+  useEffect(() => {
+    if (msg?.versions && msg?.versions.length > 0) {
+      setContent(msg.versions[current].content);
+    }
+  }, [current, msg]);
+
+  const confirmEdit = useCallback((): void => {
+    onChange && onChange(content);
+  }, [content, onChange]);
+
+  const cancelEdit = useCallback((): void => {
+    setEditing(false);
+  }, []);
+
+  const handleEditKeyPress = useCallback(
+    (e: React.KeyboardEvent<HTMLTextAreaElement>): void => {
+      if (e.key === "Enter" && e.ctrlKey) {
+        e.preventDefault();
+        confirmEdit();
+      } else if (e.key === "Escape") {
+        cancelEdit();
+      }
+    },
+    [cancelEdit, confirmEdit]
+  );
+
+  return (
+    <div
+      style={{
+        display: "flex",
+        justifyContent: "flex-end",
+      }}
+    >
+      <div
+        style={{
+          maxWidth: "70%",
+          minWidth: 400,
+          backgroundColor: "rgba(255, 255, 255, 0.8)",
+          color: "black",
+          borderRadius: "8px",
+          padding: "16px",
+          border: "none",
+          boxShadow: "0 1px 2px rgba(0, 0, 0, 0.03)",
+          textAlign: "left",
+        }}
+      >
+        {editing ? (
+          <div style={{ width: "100%" }}>
+            <TextArea
+              value={content}
+              onChange={(e) => setContent(e.target.value)}
+              onKeyPress={handleEditKeyPress}
+              autoSize={{ minRows: 2, maxRows: 8 }}
+              style={{ marginBottom: "8px", width: "100%" }}
+            />
+            <Space size="small">
+              <Button
+                size="small"
+                type="primary"
+                icon={<CheckOutlined />}
+                onClick={() => confirmEdit()}
+              >
+                确认
+              </Button>
+              <Button
+                size="small"
+                icon={<CloseOutlined />}
+                onClick={cancelEdit}
+              >
+                取消
+              </Button>
+            </Space>
+          </div>
+        ) : (
+          <div>
+            <div>
+              <Marked text={msg?.versions[current].content} />
+            </div>
+            <div
+              style={{
+                fontSize: "12px",
+                opacity: 0.6,
+                marginTop: "8px",
+              }}
+            >
+              {msg?.versions[current].timestamp}
+            </div>
+            <div>
+              <Space size="small">
+                <Tooltip title="复制">
+                  <Button
+                    size="small"
+                    type="text"
+                    icon={<CopyOutlined />}
+                    onClick={() => {
+                      msg &&
+                        navigator.clipboard
+                          .writeText(msg.versions[current].content)
+                          .then((value) => message.success("已复制到剪贴板"))
+                          .catch((reason: any) => {
+                            console.error("复制失败:", reason);
+                            message.error("复制失败");
+                          });
+                    }}
+                  />
+                </Tooltip>
+                <Tooltip title="复制">
+                  <Button
+                    size="small"
+                    type="text"
+                    icon={<EditOutlined />}
+                    onClick={() => {
+                      msg && setEditing(true);
+                    }}
+                  />
+                </Tooltip>
+              </Space>
+            </div>
+          </div>
+        )}
+      </div>
+    </div>
+  );
+};
+
+export default MsgUser;

+ 146 - 0
dashboard-v4/dashboard/src/components/chat/PromptButtonGroup.tsx

@@ -0,0 +1,146 @@
+import { useEffect, useState } from "react";
+import { Button, Dropdown, Space } from "antd";
+import type { MenuProps } from "antd";
+import { IArticleListResponse } from "../api/Article";
+import { get } from "../../request";
+import { useAppSelector } from "../../hooks";
+import { currentUser } from "../../reducers/current-user";
+
+// 接口定义
+export interface IPromptNode {
+  text: string;
+  prompt?: string;
+  children?: IPromptNode[];
+}
+
+// Markdown 解析函数
+export function parseMarkdownToPromptNodes(markdown: string): IPromptNode[] {
+  const lines = markdown
+    .split("\n")
+    .map((l) => l.trim())
+    .filter(Boolean);
+
+  const result: IPromptNode[] = [];
+  let currentButton: IPromptNode | null = null;
+  let currentChild: { title: string; content: string[] } | null = null;
+
+  for (let line of lines) {
+    if (line.startsWith("# ")) {
+      if (currentButton) {
+        if (currentChild) {
+          currentButton.children = currentButton.children || [];
+          currentButton.children.push({
+            text: currentChild.title,
+            prompt: currentChild.content.join("\n"),
+          });
+        }
+        result.push(currentButton);
+      }
+      currentButton = { text: line.replace("# ", ""), children: [] };
+      currentChild = null;
+    } else if (line.startsWith("## ")) {
+      if (currentChild) {
+        currentButton!.children!.push({
+          text: currentChild.title,
+          prompt: currentChild.content.join("\n"),
+        });
+      }
+      currentChild = {
+        title: line.replace("## ", ""),
+        content: [],
+      };
+    } else {
+      currentChild?.content.push(line);
+    }
+  }
+
+  if (currentChild) {
+    currentButton!.children!.push({
+      text: currentChild.title,
+      prompt: currentChild.content.join("\n"),
+    });
+  }
+  if (currentButton) {
+    // 若没有children但有currentChild.prompt,作为主按钮 prompt
+    if (!currentButton.children?.length && currentChild?.content.length) {
+      currentButton.prompt = currentChild.content.join("\n");
+      delete currentButton.children;
+    }
+    result.push(currentButton);
+  }
+
+  return result;
+}
+
+interface IWidget {
+  onText?: (prompt: string) => void;
+}
+// 按钮组组件
+const PromptButtonGroup = ({ onText }: IWidget) => {
+  const user = useAppSelector(currentUser);
+  const [data, setData] = useState<IPromptNode[]>([]);
+
+  useEffect(() => {
+    if (!user) {
+      return;
+    }
+    const getPromptOne = async (studio: string) => {
+      const urlTpl = `/v2/article?view=template&studio_name=${studio}&subtitle=_template_prompt_&content=true`;
+      const json = await get<IArticleListResponse>(urlTpl);
+      if (json.ok) {
+        if (json.data.rows.length > 0) {
+          if (json.data.rows[0].content) {
+            return json.data.rows[0].content;
+          }
+        }
+      }
+      return false;
+    };
+
+    const getPrompt = async () => {
+      const my = await getPromptOne(user.realName);
+      if (my) {
+        setData(parseMarkdownToPromptNodes(my));
+        return;
+      }
+      const system = await getPromptOne("admin");
+      if (system) {
+        setData(parseMarkdownToPromptNodes(system));
+      }
+    };
+    getPrompt().catch((e) => console.error(e));
+  }, [user, user?.realName]);
+
+  return (
+    <Space>
+      {data.map((node) => {
+        if (node.children && node.children.length > 0) {
+          const items: MenuProps["items"] = node.children.map((child, idx) => ({
+            key: `${node.text}-${idx}`,
+            label: child.text,
+            onClick: () => onText && onText(child.prompt || ""),
+          }));
+
+          return (
+            <Dropdown key={node.text} menu={{ items }} trigger={["click"]}>
+              <Button type="link" size="small">
+                {node.text}
+              </Button>
+            </Dropdown>
+          );
+        } else {
+          return (
+            <Button
+              key={node.text}
+              onClick={() => onText && onText(node.prompt || "")}
+            >
+              {node.text}
+            </Button>
+          );
+        }
+      })}
+    </Space>
+  );
+};
+
+export default PromptButtonGroup;

+ 1 - 17
dashboard-v4/dashboard/src/components/task/Description.tsx

@@ -22,17 +22,10 @@ const Description = ({ task, onChange, onDiscussion }: IWidget) => {
   const [mode, setMode] = useState<"read" | "edit">("read");
   const [content, setContent] = useState(task?.description);
   const [loading, setLoading] = useState(false);
-  const [open, setOpen] = useState(false);
 
   useEffect(() => setContent(task?.description), [task]);
   return (
     <div>
-      <DiscussionDrawer
-        open={open}
-        onClose={() => setOpen(false)}
-        resId={task?.id}
-        resType="task"
-      />
       <div
         style={{
           display: "flex",
@@ -46,16 +39,7 @@ const Description = ({ task, onChange, onDiscussion }: IWidget) => {
         <span>
           {mode === "read" ? (
             <Space>
-              <Button
-                key={1}
-                onClick={() => {
-                  if (typeof onDiscussion === "undefined") {
-                    setOpen(true);
-                  } else {
-                    onDiscussion();
-                  }
-                }}
-              >
+              <Button key={1} onClick={onDiscussion}>
                 {intl.formatMessage({ id: "buttons.discussion" })}
               </Button>
               <Button

+ 59 - 40
dashboard-v4/dashboard/src/components/task/TaskBuilderChapter.tsx

@@ -2,7 +2,6 @@ import {
   Button,
   Divider,
   Input,
-  message,
   Modal,
   notification,
   Space,
@@ -10,7 +9,7 @@ import {
   Typography,
 } from "antd";
 
-import { useMemo, useState } from "react";
+import { useState } from "react";
 import Workflow from "./Workflow";
 import {
   IProjectTreeData,
@@ -34,12 +33,9 @@ import {
 } from "../api/token";
 import ProjectWithTasks from "./ProjectWithTasks";
 import { useIntl } from "react-intl";
-import { NotificationPlacement } from "antd/lib/notification";
 import React from "react";
 const { Text, Paragraph } = Typography;
 
-const Context = React.createContext({ name: "Default" });
-
 interface IModal {
   studioName?: string;
   channels?: string[];
@@ -166,7 +162,7 @@ const TaskBuilderChapter = ({
             workflow={workflow}
             channelsId={channels}
             onChange={(data: IProp[] | undefined) => {
-              console.info("prop value", data);
+              console.info("TaskBuilderProp prop value", data);
               setProp(data);
               let channels = new Map<string, number>();
               data?.forEach((value) => {
@@ -200,7 +196,7 @@ const TaskBuilderChapter = ({
                 console.info("api request", url, values);
                 post<ITokenCreate, ITokenCreateResponse>(url, values).then(
                   (json) => {
-                    console.info("api response", json);
+                    console.info("api response token", json);
                     setTokens(json.data.rows);
                   }
                 );
@@ -275,6 +271,48 @@ const TaskBuilderChapter = ({
     });
   };
 
+  //生成任务组
+  const projectGroup = async () => {
+    if (!studioName || !chapter) {
+      console.error("缺少参数", studioName, chapter);
+      return;
+    }
+    const url = "/v2/project-tree";
+    const values: IProjectTreeInsertRequest = {
+      studio_name: studioName,
+      data: chapter.map((item, id) => {
+        return {
+          id: item.paragraph.toString(),
+          title: id === 0 && title ? title : item.text ?? "",
+          type: "instance",
+          weight: item.chapter_strlen,
+          parent_id: item.parent.toString(),
+          res_id: `${item.book}-${item.paragraph}`,
+        };
+      }),
+    };
+    let res;
+    try {
+      console.info("api request", url, values);
+      res = await post<IProjectTreeInsertRequest, IProjectTreeResponse>(
+        url,
+        values
+      );
+      console.info("api response", res);
+      // 检查响应状态
+      if (!res.ok) {
+        throw new Error(`HTTP error! status: `);
+      }
+      setProjects(res.data.rows);
+      setMessages((origin) => [...origin, "生成任务组成功"]);
+    } catch (error) {
+      console.error("Fetch error:", error);
+      openNotification("error", "生成任务组失败");
+      throw error;
+    }
+    return res.data.rows;
+  };
+
   const DoButton = () => {
     return (
       <>
@@ -290,48 +328,20 @@ const TaskBuilderChapter = ({
             setLoading(true);
             //生成projects
             setMessages((origin) => [...origin, "正在生成任务组……"]);
-            const url = "/v2/project-tree";
-            const values: IProjectTreeInsertRequest = {
-              studio_name: studioName,
-              data: chapter.map((item, id) => {
-                return {
-                  id: item.paragraph.toString(),
-                  title: id === 0 && title ? title : item.text ?? "",
-                  type: "instance",
-                  weight: item.chapter_strlen,
-                  parent_id: item.parent.toString(),
-                  res_id: `${item.book}-${item.paragraph}`,
-                };
-              }),
-            };
-            let res;
-            try {
-              console.info("api request", url, values);
-              res = await post<IProjectTreeInsertRequest, IProjectTreeResponse>(
-                url,
-                values
-              );
-              console.info("api response", res);
-              // 检查响应状态
-              if (!res.ok) {
-                throw new Error(`HTTP error! status: `);
-              }
-              setProjects(res.data.rows);
-              setMessages((origin) => [...origin, "生成任务组成功"]);
-            } catch (error) {
-              console.error("Fetch error:", error);
-              openNotification("error", "生成任务组失败");
-              throw error;
+            const res = await projectGroup();
+            if (!res) {
+              return;
             }
 
             //生成tasks
             setMessages((origin) => [...origin, "正在生成任务……"]);
+
             const taskUrl = "/v2/task-group";
             if (!workflow) {
               return;
             }
 
-            let taskData: ITaskGroupInsertData[] = res.data.rows
+            let taskData: ITaskGroupInsertData[] = res
               .filter((value) => value.isLeaf)
               .map((project, pId) => {
                 return {
@@ -377,6 +387,15 @@ const TaskBuilderChapter = ({
                                   ? token.payload.power === power
                                   : true)
                             );
+                            if (!mToken) {
+                              console.warn(
+                                "token not found",
+                                book,
+                                paragraph,
+                                channel,
+                                power
+                              );
+                            }
                             newContent = newContent?.replace(
                               value.key,
                               channel + (mToken ? "@" + mToken?.token : "")

+ 83 - 0
dashboard-v4/dashboard/src/components/task/TaskLog.tsx

@@ -0,0 +1,83 @@
+import { Button, Skeleton, Timeline } from "antd";
+import React, { useEffect, useState } from "react";
+import { get } from "../../request";
+import { ICommentApiData, ICommentListResponse } from "../api/Comment";
+import TimeShow from "../general/TimeShow";
+import { StatusButtons, TTaskStatus } from "../api/task";
+import { TaskStatusColor } from "./TaskStatus";
+import User from "../auth/User";
+
+interface IWidget {
+  taskId?: string;
+  onMore?: () => void;
+}
+const TaskLog = ({ taskId, onMore }: IWidget) => {
+  const [data, setData] = useState<ICommentApiData[]>();
+  const [total, setTotal] = useState<number>(0);
+  const [loading, setLoading] = useState(false);
+  useEffect(() => {
+    const url: string = `/v2/discussion?type=discussion&res_type=task&view=question&id=${taskId}&limit=5&offset=0&status=active`;
+    console.info("api request", url);
+    setLoading(true);
+    get<ICommentListResponse>(url)
+      .then((json) => {
+        if (json.ok) {
+          console.debug("discussion api response", json);
+          setData(json.data.rows);
+          setTotal(json.data.count);
+        }
+      })
+      .finally(() => setLoading(false));
+  }, [taskId]);
+
+  function findKeywordInTitle(title?: string): string | undefined {
+    if (!title) {
+      return undefined;
+    }
+    const keywords = StatusButtons;
+
+    for (const keyword of keywords) {
+      if (title.includes(keyword)) {
+        return keyword;
+      }
+    }
+
+    return undefined;
+  }
+
+  return (
+    <>
+      <Timeline>
+        {loading && <Skeleton paragraph={{ rows: 1 }} active avatar />}
+        {data?.map((item, id) => {
+          const status = findKeywordInTitle(item.title);
+          return (
+            <Timeline.Item
+              key={id}
+              color={TaskStatusColor(status as TTaskStatus)}
+              dot={<User {...item.editor} showName={false} />}
+            >
+              <div>
+                <TimeShow
+                  showLabel={false}
+                  showIcon={false}
+                  createdAt={item.created_at}
+                />
+              </div>
+              <div>{item.title}</div>
+            </Timeline.Item>
+          );
+        })}
+        {total > 5 && (
+          <Timeline.Item>
+            <Button type="link" onClick={onMore}>
+              更多
+            </Button>
+          </Timeline.Item>
+        )}
+      </Timeline>
+    </>
+  );
+};
+
+export default TaskLog;

+ 12 - 1
dashboard-v4/dashboard/src/components/task/TaskReader.tsx

@@ -17,6 +17,8 @@ import TaskStatus from "./TaskStatus";
 import Description from "./Description";
 import Category from "./Category";
 import { useIntl } from "react-intl";
+import TaskLog from "./TaskLog";
+import DiscussionDrawer from "../discussion/DiscussionDrawer";
 
 const { Text } = Typography;
 
@@ -40,6 +42,8 @@ const TaskReader = ({ taskId, onChange, onDiscussion }: IWidget) => {
   const [openNextTask, setOpenNextTask] = useState(false);
   const [task, setTask] = useState<ITaskData>();
   const [loading, setLoading] = useState(true);
+  const [open, setOpen] = useState(false);
+
   useEffect(() => {
     const url = `/v2/task/${taskId}`;
     console.info("task api request", url);
@@ -191,13 +195,20 @@ const TaskReader = ({ taskId, onChange, onDiscussion }: IWidget) => {
         </Space>
       </div>
       <Divider />
+      <TaskLog taskId={taskId} onMore={() => setOpen(true)} />
       <Description
         task={task}
         onChange={(data) => {
           setTask(data[0]);
           onChange && onChange(data);
         }}
-        onDiscussion={onDiscussion}
+        onDiscussion={() => setOpen(true)}
+      />
+      <DiscussionDrawer
+        open={open}
+        onClose={() => setOpen(false)}
+        resId={taskId}
+        resType="task"
       />
     </div>
   );

+ 21 - 26
dashboard-v4/dashboard/src/components/task/TaskStatus.tsx

@@ -1,9 +1,28 @@
 import { Progress, Tag, Tooltip } from "antd";
-import { ITaskData, ITaskResponse } from "../api/task";
+import { ITaskData, ITaskResponse, TTaskStatus } from "../api/task";
 import { useIntl } from "react-intl";
 import { useEffect, useState } from "react";
 import { get } from "../../request";
 
+const taskStatusColors: Record<TTaskStatus, string> = {
+  pending: "default",
+  published: "orange",
+  running: "processing",
+  done: "success",
+  restarted: "warning",
+  requested_restart: "warning",
+  closed: "error",
+  canceled: "error",
+  expired: "error",
+  queue: "default",
+  stop: "error",
+  quit: "error",
+  pause: "warning",
+};
+export const TaskStatusColor = (status: TTaskStatus = "pending"): string => {
+  return taskStatusColors[status];
+};
+
 interface IWidget {
   task?: ITaskData;
 }
@@ -35,31 +54,7 @@ const TaskStatus = ({ task }: IWidget) => {
     };
   }, [task]);
 
-  let color = "";
-  switch (task?.status) {
-    case "pending":
-      color = "default";
-      break;
-    case "published":
-      color = "orange";
-      break;
-    case "running":
-      color = "processing";
-      break;
-    case "done":
-      color = "success";
-      break;
-    case "restarted":
-      color = "error";
-      break;
-    case "requested_restart":
-      color = "warning";
-      break;
-    case "stop":
-      color = "error";
-      break;
-  }
-
+  const color = TaskStatusColor(task?.status);
   return (
     <>
       <Tag color={color}>

+ 7 - 0
dashboard-v4/dashboard/src/components/task/TaskStatusButton.tsx

@@ -90,6 +90,7 @@ const TaskStatusButton = ({
       menuEnable = [
         "done",
         "stop",
+        "quit",
         requested_restart_enable ? "requested_restart" : "done",
       ];
       break;
@@ -108,6 +109,12 @@ const TaskStatusButton = ({
     case "stop":
       menuEnable = ["restarted"];
       break;
+    case "quit":
+      menuEnable = ["published"];
+      break;
+    case "pause":
+      menuEnable = ["restarted"];
+      break;
   }
 
   const items: IStatusMenu[] = StatusButtons.map((item) => {

+ 1 - 0
dashboard-v4/dashboard/src/components/template/SentEdit/SentEditMenu.tsx

@@ -162,6 +162,7 @@ const SentEditMenuWidget = ({
 
   return (
     <div
+      style={{ position: "relative" }}
       onMouseEnter={() => {
         setIsHover(true);
       }}

+ 5 - 4
dashboard-v4/dashboard/src/locales/en-US/buttons.ts

@@ -95,11 +95,12 @@ const items = {
   "buttons.manage": "Manage",
   "buttons.delete.wbw.sentence": "Delete Wbw",
   "buttons.ai.translate": "AI Translate",
-  "buttons.task.status.change.to.published": "发布",
+  "buttons.task.status.change.to.published": "publish",
   "buttons.task.status.change.to.running": "领取",
-  "buttons.task.status.change.to.done": "完成任务",
-  "buttons.task.status.change.to.restarted": "重做",
-  "buttons.task.status.change.to.requested_restart": "请求重做",
+  "buttons.task.status.change.to.done": "done",
+  "buttons.task.status.change.to.restarted": "restart",
+  "buttons.task.status.change.to.requested_restart": "request restart",
+  "buttons.task.status.change.to.quit": "quit",
   "buttons.access-token.get": "access token",
   "buttons.task.add.pre-task": "pre task",
   "buttons.task.add.next-task": "next task",

+ 2 - 0
dashboard-v4/dashboard/src/locales/en-US/label.ts

@@ -69,6 +69,8 @@ const items = {
   "labels.task.status.expired": "expired",
   "labels.task.status.queue": "queue",
   "labels.task.status.stop": "stop",
+  "labels.task.status.quit": "quit",
+  "labels.task.status.pause": "pause",
   "labels.filter": "filter",
   "labels.participants": "participants",
   "labels.task.category": "task category",

+ 1 - 0
dashboard-v4/dashboard/src/locales/zh-Hans/buttons.ts

@@ -101,6 +101,7 @@ const items = {
   "buttons.task.status.change.to.done": "完成任务",
   "buttons.task.status.change.to.restarted": "重做",
   "buttons.task.status.change.to.requested_restart": "请求重做",
+  "buttons.task.status.change.to.quit": "放弃任务",
   "buttons.access-token.get": "获取访问口令",
   "buttons.task.add.pre-task": "添加前置任务",
   "buttons.task.add.next-task": "添加后置任务",

+ 2 - 0
dashboard-v4/dashboard/src/locales/zh-Hans/label.ts

@@ -77,6 +77,8 @@ const items = {
   "labels.task.status.expired": "已过期",
   "labels.task.status.queue": "排队中",
   "labels.task.status.stop": "停止",
+  "labels.task.status.quit": "放弃",
+  "labels.task.status.pause": "暂停",
   "labels.filter": "过滤器",
   "labels.participants": "参与者",
   "labels.task.category": "任务类型",

+ 4 - 25
dashboard-v4/dashboard/src/pages/library/search/search.tsx

@@ -25,7 +25,6 @@ const Widget = () => {
   const [pageType, setPageType] = useState("P");
   const [view, setView] = useState<ISearchView | undefined>("pali");
   const [caseWord, setCaseWord] = useState<string[]>();
-  const [prompt, setPrompt] = useState<string>();
   const [sysPrompt, setSysPrompt] = useState<string>();
 
   const [ftsData, setFtsData] = useState<IFtsItem[]>();
@@ -117,7 +116,9 @@ const Widget = () => {
                 return `## ${item.title}-${item.paliTitle} \n\n${item.content}\n\n`;
               })
               .join("");
-            setSysPrompt(`${chat}\n\n请根据上述巴利文本内容,回答用户的问题`);
+            setSysPrompt(
+              `# 搜索词:${key}\n\n# 搜索结果:\n\n${chat}\n\n请根据上述巴利文本内容,回答用户的问题。并猜测用户可能提问的下一个问题。列在每次回答的结尾处。可能的问题包括但是不限于:1. 生成一个概要的分类 2. 生成百科词条 范例:\n\n**下一个问题**\n\n 1. 问题1`
+            );
           }
         } else {
           console.error(json.message);
@@ -273,32 +274,10 @@ const Widget = () => {
                   ]}
                 />
                 <AIChatComponent
-                  initMessage={prompt}
                   systemPrompt={sysPrompt}
                   onChat={() => setChat(true)}
                 />
-                <div>
-                  <Space>
-                    <Button
-                      onClick={() =>
-                        setPrompt(
-                          `写一个关于**${key}**的概要,概要中的观点应该引用上述巴利文经文,并逐条列出每个巴利原文每个段落的摘要`
-                        )
-                      }
-                    >
-                      概要
-                    </Button>
-                    <Button
-                      onClick={() =>
-                        setPrompt(
-                          `写一个介绍**${key}**的百科词条,词条中的观点应该引用巴利文经文,并给出引用的巴利原文和译文`
-                        )
-                      }
-                    >
-                      术语
-                    </Button>
-                  </Space>
-                </div>
+
                 {chat ? (
                   <></>
                 ) : (

+ 16 - 4
deploy/mint.yml

@@ -10,6 +10,7 @@
     - task
     - fort
     - ai_translate
+    - openai_proxy
   roles:
     - mint-v2.1
 
@@ -68,15 +69,26 @@
         state: restarted
         scope: user
 
-- name: Start mint php-fpm
+- name: Start mint ai-translate worker
   hosts:
     - ai_translate
   tasks:
-    - name: Disable php ai-translate service
+    - name: Enable php ai-translate service
       ansible.builtin.systemd_service:
         name: "{{ app_container_prefix }}-{{ app_domain }}-worker-mq-ai.translate"
-        enabled: false
-        state: stopped
+        enabled: true
+        state: started
+        scope: user
+
+- name: Start mint openai-proxy worker
+  hosts:
+    - openai_proxy
+  tasks:
+    - name: Enable openai-proxy service
+      ansible.builtin.systemd_service:
+        name: "{{ app_container_prefix }}-{{ app_domain }}-worker-mq-openai.proxy"
+        enabled: true
+        state: started
         scope: user
 
 - name: Setup nginx

+ 35 - 0
deploy/roles/mint-v2.1/tasks/ai-translate.yml

@@ -0,0 +1,35 @@
+- name: Upload script for ai.translate
+  ansible.builtin.template:
+    src: containers/ai-translate.sh.j2
+    dest: "{{ app_deploy_target }}/scripts/worker-mq-ai.translate.sh"
+    mode: "0555"
+
+- name: Upload config for ai.translate
+  ansible.builtin.template:
+    src: v2/ai-translate.toml.j2
+    dest: "{{ app_deploy_target }}/ai-translate/config.toml"
+    mode: "0555"
+
+- name: Stop ai.translate
+  containers.podman.podman_container:
+    name: "{{ app_domain }}-worker-mq-ai.translate"
+    state: absent
+
+- name: Create ai.translate container
+  containers.podman.podman_container:
+    name: "{{ app_domain }}-worker-mq-ai.translate"
+    image: "mint-python-3.13"
+    command: "{{ app_deploy_target }}/scripts/worker-mq-ai.translate.sh"
+    volumes:
+      - "{{ app_deploy_target }}/ai-translate:{{ app_deploy_target }}:z"
+    workdir: "{{ app_deploy_target }}"
+    state: present
+    auto_remove: true
+    generate_systemd:
+      path: "{{ ansible_env.HOME }}/.config/systemd/user"
+      container_prefix: "{{ app_container_prefix }}"
+      new: true
+      names: true
+      restart_policy: always
+      restart_sec: 10
+      stop_timeout: 5

+ 2 - 1
deploy/roles/mint-v2.1/tasks/workers.yml → deploy/roles/mint-v2.1/tasks/laravel-workers.yml

@@ -1,6 +1,6 @@
 - name: Upload script for worker-{{ zone_name }}-{{ worker_name }}
   ansible.builtin.template:
-    src: containers/worker.sh.j2
+    src: containers/laravel-worker.sh.j2
     dest: "{{ app_deploy_target }}/scripts/worker-{{ zone_name }}-{{ worker_name }}.sh"
     mode: "0555"
 
@@ -16,6 +16,7 @@
     command: "{{ app_deploy_target }}/scripts/worker-{{ zone_name }}-{{ worker_name }}.sh"
     volumes:
       - "{{ app_deploy_target }}:{{ app_deploy_target }}:z"
+      - "/srv/{{ app_domain }}/clove:/srv/{{ app_domain }}/clove:z"
     workdir: "{{ app_deploy_target }}"
     state: present
     auto_remove: true

+ 2 - 3
deploy/roles/mint-v2.1/tasks/laravel.yml

@@ -32,8 +32,8 @@
     workdir: "{{ app_deploy_target }}"
     auto_remove: true
 
-- name: Setup background worker
-  ansible.builtin.include_tasks: workers.yml
+- name: Setup background php worker
+  ansible.builtin.include_tasks: laravel-workers.yml
   vars:
     zone_name: mq
   loop:
@@ -43,7 +43,6 @@
     - wbw.analyses
     - export.pali.chapter
     - export.article
-    # - ai.translate
   loop_control:
     loop_var: worker_name
 

+ 35 - 0
deploy/roles/mint-v2.1/tasks/openai-proxy.yml

@@ -0,0 +1,35 @@
+- name: Upload script for openai-proxy
+  ansible.builtin.template:
+    src: containers/openai-proxy.sh.j2
+    dest: "{{ app_deploy_target }}/scripts/openai-proxy.sh"
+    mode: "0555"
+
+- name: Upload config for openai-proxy
+  ansible.builtin.template:
+    src: v2/openai-proxy.json.j2
+    dest: "{{ app_deploy_target }}/open-ai-server/config.json"
+    mode: "0555"
+
+- name: Stop openai-proxy server
+  containers.podman.podman_container:
+    name: "{{ app_domain }}-openai.proxy"
+    state: absent
+
+- name: Create openai-proxy container
+  containers.podman.podman_container:
+    name: "{{ app_domain }}-openai.proxy"
+    image: "mint-nodejs-jod"
+    command: "{{ app_deploy_target }}/scripts/openai-proxy.sh"
+    volumes:
+      - "{{ app_deploy_target }}/open-ai-server:{{ app_deploy_target }}:z"
+    workdir: "{{ app_deploy_target }}"
+    state: present
+    auto_remove: true
+    generate_systemd:
+      path: "{{ ansible_env.HOME }}/.config/systemd/user"
+      container_prefix: "{{ app_container_prefix }}"
+      new: true
+      names: true
+      restart_policy: always
+      restart_sec: 10
+      stop_timeout: 5

+ 14 - 0
deploy/roles/mint-v2.1/templates/containers/ai-translate.sh.j2

@@ -0,0 +1,14 @@
+#!/bin/bash
+
+set -e
+
+export WORK_DIR="{{ app_deploy_target }}"
+
+source $HOME/python3/bin/activate
+
+cd $WORK_DIR/
+# https://github.com/iapt-platform/mint/blob/df8e1cf7ade16d17add360e7a869540c1ddaf1b9/api-v8/config/mint.php#L129
+python3 -m ai_translate -c config.toml -n ai.translate -q ai_translate_v2
+
+ai_translate_v2
+exit 0

+ 0 - 0
deploy/roles/mint-v2.1/templates/containers/worker.sh.j2 → deploy/roles/mint-v2.1/templates/containers/laravel-worker.sh.j2


+ 13 - 0
deploy/roles/mint-v2.1/templates/containers/openai-proxy.sh.j2

@@ -0,0 +1,13 @@
+#!/bin/bash
+
+set -e
+
+export WORK_DIR="{{ app_deploy_target }}"
+
+export NVM_DIR="$HOME/.nvm"
+source "$NVM_DIR/nvm.sh"
+
+cd $WORK_DIR/
+node dist/main-*.js config.json
+
+exit 0

+ 1 - 1
deploy/roles/mint-v2.1/templates/containers/shell.sh.j2

@@ -1,3 +1,3 @@
 #!/bin/sh
 
-podman run --rm -it --events-backend=file --hostname=mint --network host -w {{ app_deploy_target }} -v {{ app_deploy_target }}:{{ app_deploy_target }}:z {{ app_mint_image_name }} /bin/bash -l
+podman run --rm -it --events-backend=file --hostname=mint --network host -w {{ app_deploy_target }} -v /srv/{{ app_domain }}/clove:/srv/{{ app_domain }}/clove:z -v {{ app_deploy_target }}:{{ app_deploy_target }}:z {{ app_mint_image_name }} /bin/bash -l

+ 16 - 0
deploy/roles/mint-v2.1/templates/v2/ai-translate.toml.j2

@@ -0,0 +1,16 @@
+[rabbitmq]
+host = '{{ app_rabbitmq_host }}'
+port = {{ app_rabbitmq_port }}
+user = '{{ app_rabbitmq_user }}'
+password = '{{ app_rabbitmq_password }}'
+virtual-host = '{{ app_rabbitmq_virtual_host }}'
+customer-timeout = 3600
+
+[redis]
+namespace = '{{ app_domain }}://'
+host = '{{ app_redis_host }}'
+port = {{ app_redis_port }}
+
+[app]
+api-url = '{{ app_openai_proxy_server }}/api'
+openai-proxy-url = '{{ app_openai_proxy_server }}/api/openai'

+ 5 - 0
deploy/roles/mint-v2.1/templates/v2/openai-proxy.json.j2

@@ -0,0 +1,5 @@
+{ 
+    "port": 4000, 
+    "debug": false, 
+    "api-server": "{{ app_api_server }}" 
+}

+ 5 - 1
open-ai-server/config.orig.json

@@ -1 +1,5 @@
-{ "port": 4000, "debug": true, "api_server": "http://staging.wikipali.org/api" }
+{
+  "port": 4000,
+  "debug": true,
+  "api-url": "https://staging.wikipali.org/api"
+}

+ 1 - 0
open-ai-server/src/index.js

@@ -7,4 +7,5 @@ server.listen(port, () => {
   logger.info("Server is running on port %d", port);
   logger.info("Health check: http://0.0.0.0:%d/health", port);
   logger.info("API endpoint: http://0.0.0.0:%d/api/openai", port);
+  logger.info("API Server Url: %s", config["api-url"]);
 });

+ 4 - 2
open-ai-server/src/server.js

@@ -10,7 +10,7 @@ const app = express();
 // 中间件
 app.use(cors());
 app.use(express.json());
-const api_server = config["api_server"];
+const api_url = config["api-url"];
 // POST 路由处理OpenAI请求
 app.post("/api/openai", async (req, res) => {
   try {
@@ -29,7 +29,7 @@ app.post("/api/openai", async (req, res) => {
     } else {
       //get model info from api server
       try {
-        const url = api_server + `/v2/ai-model/${model_id}`;
+        const url = api_url + `/v2/ai-model/${model_id}`;
         logger.info("get model info from api server " + url);
         const response = await fetch(url, {
           method: "GET",
@@ -79,6 +79,8 @@ app.post("/api/openai", async (req, res) => {
       headers["x-api-key"] = apiKey;
       headers["anthropic-version"] = "2023-06-01";
     }
+
+    logger.info("request " + requestUrl);
     if (isStreaming) {
       // 流式响应处理
       res.setHeader("Content-Type", "text/event-stream");