Sfoglia il codice sorgente

:construction: use webpack to build open-ai-server project

Jeremy Zheng 9 mesi fa
parent
commit
e4519c2952

+ 4 - 2
open-ai-server/.gitignore

@@ -1,2 +1,4 @@
-/node_modules
-package-lock.json
+/node_modules/
+/dist/
+/package-lock.json
+/config.json

+ 5 - 7
open-ai-server/README.md

@@ -12,13 +12,11 @@ npm install
 
 1. **启动服务器**:
 
-```bash
-# 开发模式(自动重启)
-npm run dev
+`bash
+npm run build
+node dist/main-XXX.js config.json
 
-# 生产模式
-npm start
-```
+````
 
 ## 主要功能特点
 
@@ -48,7 +46,7 @@ npm start
     "stream": true // 可选,启用流式响应
   }
 }
-```
+````
 
 **非流式响应**:返回完整的 JSON 结果
 **流式响应**:返回 Server-Sent Events 格式的实时数据流

+ 1 - 0
open-ai-server/config.orig.json

@@ -0,0 +1 @@
+{ "port": 4000, "debug": true }

+ 11 - 6
open-ai-server/package.json

@@ -1,11 +1,13 @@
 {
   "name": "openai-proxy-api",
-  "version": "1.0.0",
+  "version": "2025.7.4",
   "description": "RESTful API proxy for OpenAI with streaming support",
-  "main": "server.js",
+  "main": "index.js",
+  "private": true,
   "scripts": {
-    "start": "node server.js",
-    "dev": "nodemon server.js",
+    "start": "node main.js",
+    "dev": "nodemon main.js",
+    "build": "NODE_ENV=production webpack --config webpack.config.js",
     "test": "echo \"Error: no test specified\" && exit 1"
   },
   "keywords": [
@@ -19,12 +21,15 @@
   "author": "",
   "license": "MIT",
   "dependencies": {
+    "cors": "^2.8.5",
     "express": "^4.18.2",
     "openai": "^4.20.1",
-    "cors": "^2.8.5"
+    "pino": "^9.7.0",
+    "pino-pretty": "^13.0.0"
   },
   "devDependencies": {
-    "nodemon": "^3.0.1"
+    "webpack": "^5.99.9",
+    "webpack-cli": "^6.0.1"
   },
   "engines": {
     "node": ">=16.0.0"

+ 22 - 0
open-ai-server/src/index.js

@@ -0,0 +1,22 @@
+import progress from "node:process";
+import fs from "node:fs";
+
+import logger from "./logger";
+import server from "./server";
+
+if (progress.argv.length !== 3) {
+  logger.error("USAGE: node main-XXX.js config.json");
+  process.exit(1);
+}
+
+const config_file = progress.argv[2];
+const config = JSON.parse(fs.readFileSync(config_file, "utf8"));
+logger.info(`load config from ${config_file}`);
+logger.debug("run on debug mode");
+
+const port = config["port"];
+server.listen(port, () => {
+  logger.info("Server is running on port %d", port);
+  logger.info("Health check: http://0.0.0.0:%d/health", port);
+  logger.info("API endpoint: http://0.0.0.0:%d/api/openai", port);
+});

+ 14 - 0
open-ai-server/src/logger.js

@@ -0,0 +1,14 @@
+import pino from "pino";
+import pretty from "pino-pretty";
+
+const stream = pretty({
+  colorize: true,
+});
+const logger = pino(
+  {
+    level: "debug",
+  },
+  stream
+);
+
+export default logger;

+ 10 - 16
open-ai-server/server.js → open-ai-server/src/server.js

@@ -1,9 +1,10 @@
-const express = require("express");
-const OpenAI = require("openai");
-const cors = require("cors");
+import express from "express";
+import OpenAI from "openai";
+import cors from "cors";
+
+import logger from "./logger";
 
 const app = express();
-const PORT = process.env.PORT || 3000;
 
 // 中间件
 app.use(cors());
@@ -13,7 +14,7 @@ app.use(express.json());
 app.post("/api/openai", async (req, res) => {
   try {
     const { open_ai_url, api_key, payload } = req.body;
-    console.debug("request", open_ai_url);
+    logger.debug("request %s", open_ai_url);
     // 验证必需的参数
     if (!open_ai_url || !api_key || !payload) {
       return res.status(400).json({
@@ -42,7 +43,7 @@ app.post("/api/openai", async (req, res) => {
           ...payload,
           stream: true,
         });
-        console.info("waiting response");
+        logger.info("waiting response");
         for await (const chunk of stream) {
           const data = JSON.stringify(chunk);
           res.write(`data: ${data}\n\n`);
@@ -51,7 +52,7 @@ app.post("/api/openai", async (req, res) => {
         res.write("data: [DONE]\n\n");
         res.end();
       } catch (streamError) {
-        console.error("Streaming error:", streamError);
+        logger.error("Streaming error: %s", streamError);
         res.write(
           `data: ${JSON.stringify({ error: streamError.message })}\n\n`
         );
@@ -64,7 +65,7 @@ app.post("/api/openai", async (req, res) => {
       res.json(completion);
     }
   } catch (error) {
-    console.error("API Error:", error);
+    logger.error("API Error: %s", error);
 
     // 处理不同类型的错误
     if (error.status) {
@@ -86,11 +87,4 @@ app.get("/health", (req, res) => {
   res.json({ status: "OK", timestamp: new Date().toISOString() });
 });
 
-// 启动服务器
-app.listen(PORT, () => {
-  console.log(`Server is running on port ${PORT}`);
-  console.log(`Health check: http://localhost:${PORT}/health`);
-  console.log(`API endpoint: http://localhost:${PORT}/api/openai`);
-});
-
-module.exports = app;
+export default app;

+ 12 - 0
open-ai-server/webpack.config.js

@@ -0,0 +1,12 @@
+const path = require("path");
+
+module.exports = {
+  entry: "./src/index.js",
+  output: {
+    path: path.resolve(__dirname, "dist"),
+    filename: "[name].[contenthash].js",
+    clean: true,
+  },
+  optimization: { runtimeChunk: "single" },
+  target: "node",
+};