Просмотр исходного кода

Merge branch 'iapt-platform:development' into development

visuddhinanda 9 месяцев назад
Родитель
Сommit
beb576e0ae

+ 2 - 1
ai-translate/docker/Dockerfile

@@ -30,7 +30,8 @@ RUN echo 'source $HOME/python3/bin/activate' >> $HOME/.bashrc
 
 # https://pip.pypa.io/en/stable/installation/#get-pip-py
 ADD --chown=deploy https://bootstrap.pypa.io/get-pip.py /opt/
-RUN bash -c "source $HOME/python3/bin/activate && python3 /opt/get-pip.py"
+RUN bash -c ". $HOME/python3/bin/activate && python3 /opt/get-pip.py"
+RUN bash -c ". $HOME/python3/bin/activate && pip install pika requests redis[hiredis] openai"
 
 # COPY --chown=deploy README.md pyproject.toml /opt/ai-translate/
 # COPY --chown=deploy ai_translate /opt/ai-translate/ai_translate

+ 1 - 1
ai-translate/docker/build.sh

@@ -8,7 +8,7 @@ if [ "$#" -ne 1 ]; then
 fi
 
 export VERSION=$(date "+%4Y%m%d%H%M%S")
-export CODE="mint-python$1"
+export CODE="mint-python-$1"
 export TAR="$CODE-$VERSION-$(uname -m)"
 
 podman pull ubuntu:latest

+ 1 - 1
ai-translate/docker/run.sh

@@ -5,4 +5,4 @@ if [ "$#" -ne 1 ]; then
     exit 1
 fi
 
-podman run --rm -it --events-backend=file --hostname=palm --network host -v $PWD:/srv:z "mint-python$1"
+podman run --rm -it --events-backend=file --hostname=palm --network host -v $PWD:/srv:z "mint-python-$1"

+ 1 - 1
api-v12/docker/build.sh

@@ -8,7 +8,7 @@ if [ "$#" -ne 1 ]; then
 fi
 
 export VERSION=$(date "+%4Y%m%d%H%M%S")
-export CODE="mint-php$1"
+export CODE="mint-php-$1"
 export TAR="$CODE-$VERSION-$(uname -m)"
 
 podman pull ubuntu:latest

+ 1 - 1
api-v12/docker/run.sh

@@ -5,4 +5,4 @@ if [ "$#" -ne 1 ]; then
     exit 1
 fi
 
-podman run --rm -it --events-backend=file --hostname=palm --network host -v $PWD:/srv:z "mint-php$1"
+podman run --rm -it --events-backend=file --hostname=palm --network host -v $PWD:/srv:z "mint-php-$1"

+ 4 - 2
open-ai-server/.gitignore

@@ -1,2 +1,4 @@
-/node_modules
-package-lock.json
+/node_modules/
+/dist/
+/package-lock.json
+/config.json

+ 5 - 7
open-ai-server/README.md

@@ -12,13 +12,11 @@ npm install
 
 1. **启动服务器**:
 
-```bash
-# 开发模式(自动重启)
-npm run dev
+`bash
+npm run build
+node dist/main-XXX.js config.json
 
-# 生产模式
-npm start
-```
+````
 
 ## 主要功能特点
 
@@ -48,7 +46,7 @@ npm start
     "stream": true // 可选,启用流式响应
   }
 }
-```
+````
 
 **非流式响应**:返回完整的 JSON 结果
 **流式响应**:返回 Server-Sent Events 格式的实时数据流

+ 1 - 0
open-ai-server/config.orig.json

@@ -0,0 +1 @@
+{ "port": 4000, "debug": true }

+ 2 - 0
open-ai-server/docker/.gitignore

@@ -0,0 +1,2 @@
+*.tar
+*.md5

+ 33 - 0
open-ai-server/docker/Dockerfile

@@ -0,0 +1,33 @@
+FROM ubuntu:latest
+LABEL maintainer="Kassapa"
+
+ENV DEBIAN_FRONTEND noninteractive
+
+# https://nodejs.org/en/about/previous-releases
+ARG NODEJS_VERSION="jod"
+
+RUN apt update
+RUN apt -y upgrade
+RUN apt -y install lsb-release curl wget git vim locales locales-all tzdata build-essential
+RUN apt -y autoremove
+RUN apt -y clean
+
+RUN echo "en_US.UTF-8 UTF-8" > /etc/locale.gen
+RUN locale-gen
+RUN update-locale LANG=en_US.UTF-8
+RUN update-alternatives --set editor /usr/bin/vim.basic
+
+# https://github.com/nvm-sh/nvm
+ENV NVM_VERSION "v0.40.3"
+RUN git clone -b ${NVM_VERSION} https://github.com/nvm-sh/nvm.git $HOME/.nvm
+RUN echo 'export NVM_DIR="$HOME/.nvm"' >> $HOME/.bashrc
+RUN echo '[ -s "$NVM_DIR/nvm.sh" ] && \. "$NVM_DIR/nvm.sh"' >> $HOME/.bashrc
+RUN echo '[ -s "$NVM_DIR/bash_completion" ] && \. "$NVM_DIR/bash_completion"' >> $HOME/.bashrc
+RUN bash -c ". $HOME/.nvm/nvm.sh && nvm install --lts=${NODEJS_VERSION}"
+
+RUN echo "$(date -u +%4Y%m%d%H%M%S)" | tee /VERSION
+
+VOLUME /srv
+WORKDIR /srv
+
+CMD ["/bin/bash", "-l"]

+ 21 - 0
open-ai-server/docker/build.sh

@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -e
+
+if [ "$#" -ne 1 ]; then
+    echo "USAGE: $0 NODEJS_VERSION"
+    exit 1
+fi
+
+export VERSION=$(date "+%4Y%m%d%H%M%S")
+export CODE="mint-nodejs-$1"
+export TAR="$CODE-$VERSION-$(uname -m)"
+
+podman pull ubuntu:latest
+podman build --build-arg NODEJS_VERSION=$1 -t $CODE .
+podman save --format=oci-archive -o $TAR.tar $CODE
+md5sum $TAR.tar >$TAR.md5
+
+echo "done($TAR.tar)."
+
+exit 0

+ 8 - 0
open-ai-server/docker/run.sh

@@ -0,0 +1,8 @@
+#!/bin/bash
+
+if [ "$#" -ne 1 ]; then
+    echo "USAGE: $0 NODEJS_VERSION"
+    exit 1
+fi
+
+podman run --rm -it --events-backend=file --hostname=palm --network host -v $PWD:/srv:z "mint-nodejs-$1"

+ 11 - 6
open-ai-server/package.json

@@ -1,11 +1,13 @@
 {
   "name": "openai-proxy-api",
-  "version": "1.0.0",
+  "version": "2025.7.4",
   "description": "RESTful API proxy for OpenAI with streaming support",
-  "main": "server.js",
+  "main": "index.js",
+  "private": true,
   "scripts": {
-    "start": "node server.js",
-    "dev": "nodemon server.js",
+    "start": "node main.js",
+    "dev": "nodemon main.js",
+    "build": "NODE_ENV=production webpack --config webpack.config.js",
     "test": "echo \"Error: no test specified\" && exit 1"
   },
   "keywords": [
@@ -19,12 +21,15 @@
   "author": "",
   "license": "MIT",
   "dependencies": {
+    "cors": "^2.8.5",
     "express": "^4.18.2",
     "openai": "^4.20.1",
-    "cors": "^2.8.5"
+    "pino": "^9.7.0",
+    "pino-pretty": "^13.0.0"
   },
   "devDependencies": {
-    "nodemon": "^3.0.1"
+    "webpack": "^5.99.9",
+    "webpack-cli": "^6.0.1"
   },
   "engines": {
     "node": ">=16.0.0"

+ 22 - 0
open-ai-server/src/index.js

@@ -0,0 +1,22 @@
+import progress from "node:process";
+import fs from "node:fs";
+
+import logger from "./logger";
+import server from "./server";
+
+if (progress.argv.length !== 3) {
+  logger.error("USAGE: node main-XXX.js config.json");
+  process.exit(1);
+}
+
+const config_file = progress.argv[2];
+const config = JSON.parse(fs.readFileSync(config_file, "utf8"));
+logger.info(`load config from ${config_file}`);
+logger.debug("run on debug mode");
+
+const port = config["port"];
+server.listen(port, () => {
+  logger.info("Server is running on port %d", port);
+  logger.info("Health check: http://0.0.0.0:%d/health", port);
+  logger.info("API endpoint: http://0.0.0.0:%d/api/openai", port);
+});

+ 14 - 0
open-ai-server/src/logger.js

@@ -0,0 +1,14 @@
+import pino from "pino";
+import pretty from "pino-pretty";
+
+const stream = pretty({
+  colorize: true,
+});
+const logger = pino(
+  {
+    level: "debug",
+  },
+  stream
+);
+
+export default logger;

+ 10 - 16
open-ai-server/server.js → open-ai-server/src/server.js

@@ -1,9 +1,10 @@
-const express = require("express");
-const OpenAI = require("openai");
-const cors = require("cors");
+import express from "express";
+import OpenAI from "openai";
+import cors from "cors";
+
+import logger from "./logger";
 
 const app = express();
-const PORT = process.env.PORT || 3000;
 
 // 中间件
 app.use(cors());
@@ -13,7 +14,7 @@ app.use(express.json());
 app.post("/api/openai", async (req, res) => {
   try {
     const { open_ai_url, api_key, payload } = req.body;
-    console.debug("request", open_ai_url);
+    logger.debug("request %s", open_ai_url);
     // 验证必需的参数
     if (!open_ai_url || !api_key || !payload) {
       return res.status(400).json({
@@ -42,7 +43,7 @@ app.post("/api/openai", async (req, res) => {
           ...payload,
           stream: true,
         });
-        console.info("waiting response");
+        logger.info("waiting response");
         for await (const chunk of stream) {
           const data = JSON.stringify(chunk);
           res.write(`data: ${data}\n\n`);
@@ -51,7 +52,7 @@ app.post("/api/openai", async (req, res) => {
         res.write("data: [DONE]\n\n");
         res.end();
       } catch (streamError) {
-        console.error("Streaming error:", streamError);
+        logger.error("Streaming error: %s", streamError);
         res.write(
           `data: ${JSON.stringify({ error: streamError.message })}\n\n`
         );
@@ -64,7 +65,7 @@ app.post("/api/openai", async (req, res) => {
       res.json(completion);
     }
   } catch (error) {
-    console.error("API Error:", error);
+    logger.error("API Error: %s", error);
 
     // 处理不同类型的错误
     if (error.status) {
@@ -86,11 +87,4 @@ app.get("/health", (req, res) => {
   res.json({ status: "OK", timestamp: new Date().toISOString() });
 });
 
-// 启动服务器
-app.listen(PORT, () => {
-  console.log(`Server is running on port ${PORT}`);
-  console.log(`Health check: http://localhost:${PORT}/health`);
-  console.log(`API endpoint: http://localhost:${PORT}/api/openai`);
-});
-
-module.exports = app;
+export default app;

+ 12 - 0
open-ai-server/webpack.config.js

@@ -0,0 +1,12 @@
+const path = require("path");
+
+module.exports = {
+  entry: "./src/index.js",
+  output: {
+    path: path.resolve(__dirname, "dist"),
+    filename: "[name].[contenthash].js",
+    clean: true,
+  },
+  optimization: { runtimeChunk: "single" },
+  target: "node",
+};