2
0
visuddhinanda 9 сар өмнө
parent
commit
5ee11932a8

+ 6 - 3
ai-translate/ai_translate/__init__.py

@@ -20,7 +20,7 @@ def open_redis_cluster(config):
     return (cli, config['namespace'])
     return (cli, config['namespace'])
 
 
 
 
-def start_consumer(context, name, config, queue, callback):
+def start_consumer(context, name, config, queue, callback,proxy):
     logger.debug("open rabbitmq %s@%s:%d/%s with timeout %ds",
     logger.debug("open rabbitmq %s@%s:%d/%s with timeout %ds",
                  config['user'], config['host'], config['port'], config['virtual-host'], config['customer-timeout'])
                  config['user'], config['host'], config['port'], config['virtual-host'], config['customer-timeout'])
     connection = pika.BlockingConnection(
     connection = pika.BlockingConnection(
@@ -37,7 +37,7 @@ def start_consumer(context, name, config, queue, callback):
         handle_message(context, ch, method, properties.message_id,
         handle_message(context, ch, method, properties.message_id,
                        properties.content_type, json.loads(
                        properties.content_type, json.loads(
                            body, object_hook=SimpleNamespace),
                            body, object_hook=SimpleNamespace),
-                       callback, config['customer-timeout'])
+                       callback,proxy, config['customer-timeout'])
 
 
     channel.basic_consume(
     channel.basic_consume(
         queue=queue, on_message_callback=_callback, auto_ack=False)
         queue=queue, on_message_callback=_callback, auto_ack=False)
@@ -52,5 +52,8 @@ def launch(name, queue, config_file):
         config = tomllib.load(config_fd)
         config = tomllib.load(config_fd)
         logger.debug('api-url:(%s)', config['app']['api-url'])
         logger.debug('api-url:(%s)', config['app']['api-url'])
         redis_cli = open_redis_cluster(config['redis'])
         redis_cli = open_redis_cluster(config['redis'])
+        openai_proxy = config['app'].get('openai-proxy', None)
         start_consumer(redis_cli, name,
         start_consumer(redis_cli, name,
-                       config['rabbitmq'], queue, config['app']['api-url'])
+                       config['rabbitmq'], 
+                       queue, config['app']['api-url'], 
+                       openai_proxy)

+ 20 - 7
ai-translate/ai_translate/service.py

@@ -106,7 +106,7 @@ class Message:
 class AiTranslateService:
 class AiTranslateService:
     """AI翻译服务"""
     """AI翻译服务"""
 
 
-    def __init__(self, redis, ch, method, api_url, customer_timeout):
+    def __init__(self, redis, ch, method, api_url, openai_proxy,customer_timeout):
         self.queue = 'ai_translate'
         self.queue = 'ai_translate'
         self.model_token = None
         self.model_token = None
         self.task = None
         self.task = None
@@ -119,6 +119,7 @@ class AiTranslateService:
         self.customer_timeout = customer_timeout
         self.customer_timeout = customer_timeout
         self.channel = ch
         self.channel = ch
         self.maxProcessTime = 15 * 60  # 一个句子的最大处理时间
         self.maxProcessTime = 15 * 60  # 一个句子的最大处理时间
+        self.openai_proxy=openai_proxy 
 
 
     def process_translate(self, message_id: str, body: Message) -> bool:
     def process_translate(self, message_id: str, body: Message) -> bool:
         """处理翻译任务"""
         """处理翻译任务"""
@@ -355,12 +356,24 @@ class AiTranslateService:
 
 
         while attempt < max_retries:
         while attempt < max_retries:
             try:
             try:
-                response = requests.post(
-                    message.model.url,
-                    json=param,
-                    headers=headers,
-                    timeout=self.llm_timeout
-                )
+                if self.openai_proxy:
+                    response = requests.post(
+                        self.openai_proxy,
+                        json={
+                            "open_ai_url": message.model.url,
+                            "api_key": message.model.key,
+                            'payload':param,
+                        },
+                        headers=headers,
+                        timeout=self.llm_timeout
+                    )
+                else:
+                    response = requests.post(
+                        message.model.url,
+                        json=param,
+                        headers=headers,
+                        timeout=self.llm_timeout
+                    )
                 response.raise_for_status()
                 response.raise_for_status()
 
 
                 logger.info(f'{self.queue} LLM request successful')
                 logger.info(f'{self.queue} LLM request successful')

+ 1 - 1
ai-translate/ai_translate/worker.py

@@ -7,7 +7,7 @@ from .utils import is_stopped
 logger = logging.getLogger(__name__)
 logger = logging.getLogger(__name__)
 
 
 
 
-def handle_message(redis, ch, method, id, content_type, body, api_url: str, customer_timeout: int):
+def handle_message(redis, ch, method, id, content_type, body, api_url: str,openai_proxy:str, customer_timeout: int):
     MaxRetry: int = 3
     MaxRetry: int = 3
     try:
     try:
         logger.info("process message start (%s) messages", len(body.payload))
         logger.info("process message start (%s) messages", len(body.payload))

+ 1 - 0
ai-translate/config.orig.toml

@@ -13,3 +13,4 @@ port = 6371
 
 
 [app]
 [app]
 api-url = 'http://127.0.0.1:8000/api'
 api-url = 'http://127.0.0.1:8000/api'
+openai-proxy = 'http://localhost:4000/api/openai'