visuddhinanda 9 månader sedan
förälder
incheckning
17049145de
4 ändrade filer med 322 tillägg och 0 borttagningar
  1. 2 0
      open-ai-server/.gitignore
  2. 189 0
      open-ai-server/README.md
  3. 32 0
      open-ai-server/package.json
  4. 99 0
      open-ai-server/server.js

+ 2 - 0
open-ai-server/.gitignore

@@ -0,0 +1,2 @@
+/node_modules
+package-lock.json

+ 189 - 0
open-ai-server/README.md

@@ -0,0 +1,189 @@
+# open ai server
+
+这个 API 将接收 POST 请求,使用 OpenAI SDK 调用 API,并支持流式响应。现在我来创建 package.json 文件,包含所需的依赖:让我再创建一个使用示例文件,展示如何调用这个 API:
+
+## 安装和运行步骤
+
+1. **安装依赖**:
+
+```bash
+npm install
+```
+
+1. **启动服务器**:
+
+```bash
+# 开发模式(自动重启)
+npm run dev
+# 启动服务器(端口4000)
+PORT=4000 npm run dev
+
+# 生产模式
+npm start
+```
+
+## 主要功能特点
+
+**✅ RESTful API**:使用 Express.js 创建 POST 端点 `/api/openai`
+
+**✅ 支持流式响应**:当`payload.stream = true`时启用 Server-Sent Events
+
+**✅ 错误处理**:完整的错误处理和状态码响应
+
+**✅ CORS 支持**:支持跨域请求
+
+**✅ 参数验证**:验证必需的参数
+
+**✅ 健康检查**:提供`/health`端点用于服务监控
+
+## API 使用方法
+
+**请求格式**:
+
+```json
+{
+  "open_ai_url": "https://api.openai.com/v1",
+  "api_key": "your-api-key",
+  "payload": {
+    "model": "gpt-4",
+    "messages": [{ "role": "user", "content": "your message" }],
+    "stream": true // 可选,启用流式响应
+  }
+}
+```
+
+**非流式响应**:返回完整的 JSON 结果
+**流式响应**:返回 Server-Sent Events 格式的实时数据流
+
+服务器会在端口 3000 上运行,你可以通过环境变量`PORT`来修改端口号。
+
+## 测试
+
+```bash
+# 非流式请求
+curl -X POST http://localhost:3000/api/openai \
+  -H "Content-Type: application/json" \
+  -d '{
+    "open_ai_url": "https://api.openai.com/v1",
+    "api_key": "your-api-key-here",
+    "payload": {
+      "model": "gpt-4",
+      "messages": [
+        {
+          "role": "user",
+          "content": "Tell me a three sentence bedtime story about a unicorn."
+        }
+      ],
+      "max_tokens": 150,
+      "temperature": 0.7
+    }
+  }'
+
+# 流式请求
+curl -X POST http://localhost:3000/api/openai \
+  -H "Content-Type: application/json" \
+  -d '{
+    "open_ai_url": "https://api.openai.com/v1",
+    "api_key": "your-api-key-here",
+    "payload": {
+      "model": "gpt-4",
+      "messages": [
+        {
+          "role": "user",
+          "content": "Tell me a three sentence bedtime story about a unicorn."
+        }
+      ],
+      "max_tokens": 150,
+      "temperature": 0.7,
+      "stream": true
+    }
+  }'
+```
+
+```javascript
+// 1. 非流式请求示例
+async function callOpenAIAPI() {
+  const response = await fetch("http://localhost:3000/api/openai", {
+    method: "POST",
+    headers: {
+      "Content-Type": "application/json",
+    },
+    body: JSON.stringify({
+      open_ai_url: "https://api.openai.com/v1",
+      api_key: "your-api-key-here",
+      payload: {
+        model: "gpt-4",
+        messages: [
+          {
+            role: "user",
+            content: "Tell me a three sentence bedtime story about a unicorn.",
+          },
+        ],
+        max_tokens: 150,
+        temperature: 0.7,
+      },
+    }),
+  });
+
+  const data = await response.json();
+  console.log("Non-streaming response:", data);
+}
+
+// 2. 流式请求示例
+async function callOpenAIAPIStreaming() {
+  const response = await fetch("http://localhost:3000/api/openai", {
+    method: "POST",
+    headers: {
+      "Content-Type": "application/json",
+    },
+    body: JSON.stringify({
+      open_ai_url: "https://api.openai.com/v1",
+      api_key: "your-api-key-here",
+      payload: {
+        model: "gpt-4",
+        messages: [
+          {
+            role: "user",
+            content: "Tell me a three sentence bedtime story about a unicorn.",
+          },
+        ],
+        max_tokens: 150,
+        temperature: 0.7,
+        stream: true, // 开启流式响应
+      },
+    }),
+  });
+
+  const reader = response.body.getReader();
+  const decoder = new TextDecoder();
+
+  try {
+    while (true) {
+      const { done, value } = await reader.read();
+      if (done) break;
+
+      const chunk = decoder.decode(value);
+      const lines = chunk.split("\n");
+
+      for (const line of lines) {
+        if (line.startsWith("data: ")) {
+          const data = line.slice(6);
+          if (data === "[DONE]") {
+            console.log("Stream finished");
+            return;
+          }
+
+          try {
+            const parsed = JSON.parse(data);
+            console.log("Streaming chunk:", parsed);
+          } catch (e) {
+            // 忽略解析错误
+          }
+        }
+      }
+    }
+  } finally {
+    reader.releaseLock();
+  }
+}
+```

+ 32 - 0
open-ai-server/package.json

@@ -0,0 +1,32 @@
+{
+  "name": "openai-proxy-api",
+  "version": "1.0.0",
+  "description": "RESTful API proxy for OpenAI with streaming support",
+  "main": "server.js",
+  "scripts": {
+    "start": "node server.js",
+    "dev": "nodemon server.js",
+    "test": "echo \"Error: no test specified\" && exit 1"
+  },
+  "keywords": [
+    "openai",
+    "api",
+    "proxy",
+    "streaming",
+    "nodejs",
+    "express"
+  ],
+  "author": "",
+  "license": "MIT",
+  "dependencies": {
+    "express": "^4.18.2",
+    "openai": "^4.20.1",
+    "cors": "^2.8.5"
+  },
+  "devDependencies": {
+    "nodemon": "^3.0.1"
+  },
+  "engines": {
+    "node": ">=16.0.0"
+  }
+}

+ 99 - 0
open-ai-server/server.js

@@ -0,0 +1,99 @@
+const express = require("express");
+const OpenAI = require("openai");
+const cors = require("cors");
+
+const app = express();
+const PORT = process.env.PORT || 3000;
+
+// 中间件
+app.use(cors());
+app.use(express.json());
+
+// POST 路由处理OpenAI请求
+app.post("/api/openai", async (req, res) => {
+  try {
+    const { open_ai_url, api_key, payload } = req.body;
+
+    // 验证必需的参数
+    if (!open_ai_url || !api_key || !payload) {
+      return res.status(400).json({
+        error: "Missing required parameters: open_ai_url, api_key, or payload",
+      });
+    }
+
+    // 初始化OpenAI客户端
+    const openai = new OpenAI({
+      apiKey: api_key,
+      baseURL: open_ai_url,
+    });
+
+    // 检查是否需要流式响应
+    const isStreaming = payload.stream === true;
+
+    if (isStreaming) {
+      // 流式响应
+      res.setHeader("Content-Type", "text/event-stream");
+      res.setHeader("Cache-Control", "no-cache");
+      res.setHeader("Connection", "keep-alive");
+      res.setHeader("Access-Control-Allow-Origin", "*");
+
+      try {
+        const stream = await openai.chat.completions.create({
+          ...payload,
+          stream: true,
+        });
+        console.info("waiting response");
+        for await (const chunk of stream) {
+          const data = JSON.stringify(chunk);
+          res.write(`data: ${data}\n\n`);
+        }
+
+        res.write("data: [DONE]\n\n");
+        res.end();
+      } catch (streamError) {
+        console.error("Streaming error:", streamError);
+        res.write(
+          `data: ${JSON.stringify({ error: streamError.message })}\n\n`
+        );
+        res.end();
+      }
+    } else {
+      // 非流式响应
+      const completion = await openai.chat.completions.create(payload);
+
+      res.json({
+        success: true,
+        data: completion,
+      });
+    }
+  } catch (error) {
+    console.error("API Error:", error);
+
+    // 处理不同类型的错误
+    if (error.status) {
+      return res.status(error.status).json({
+        error: error.message,
+        type: error.type || "api_error",
+      });
+    }
+
+    res.status(500).json({
+      error: "Internal server error",
+      message: error.message,
+    });
+  }
+});
+
+// 健康检查端点
+app.get("/health", (req, res) => {
+  res.json({ status: "OK", timestamp: new Date().toISOString() });
+});
+
+// 启动服务器
+app.listen(PORT, () => {
+  console.log(`Server is running on port ${PORT}`);
+  console.log(`Health check: http://localhost:${PORT}/health`);
+  console.log(`API endpoint: http://localhost:${PORT}/api/openai`);
+});
+
+module.exports = app;