NodeJS 无法从GPT API获取逐字响应

sg24os4d  于 2023-03-22  发布在  Node.js
关注(0)|答案(1)|浏览(146)

我试图从我的API得到响应,像chatGPT一样一个字一个字地生成,而不是一次全部。我有所有其他的东西工作,得到预期的响应,只是没有大块。
我可以在控制台打印部分响应,但无法在UI上显示,有人能帮忙吗?
这是我的后端代码

import { ChatGPTAPI } from "chatgpt";

app.post("/", async (req, res) => {
  const { message } = req.body;
  const api = new ChatGPTAPI({
    apiKey: OPENAI_API_KEY,
  });

  const resp = await api.sendMessage(
    message, {
      onProgress: (partialResponse) => {
        console.log(partialResponse);
      },
    }
  );
  
// Code for sending the response all at once
  // if (resp.text) {
  //   res.json({
  //     message: resp.text,
  //   });
  // }
});

const server = app.listen(5000, () => {
  console.log("app listening");
});

server.headersTimeout = 610000;

这就是我在前端获取它的方式

const handleSubmit = (e) => {
    e.preventDefault();

    fetch("http://localhost:5000", {
      method: "POST",
      headers: {
        "Content-Type": "application/json",
      },
      body: JSON.stringify({ message }),
    })
      .then((res) => res.json())
      .then((data) => {
        setResponse(data.message);
        setMessage("");
      });
  };
vxf3dgd4

vxf3dgd41#

无论是GPT-3 API还是ChatGPT接口(即GPT-3.5接口),都需要stream参数设置为true(默认为false),这样就可以得到 * 逐字 * 的响应。

NodeJS

ChatGPT API工作示例

如果运行test.js,OpenAI API将返回以下完成:

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"role":"assistant"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":"\n\n"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":"Hello"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":" there"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":"!"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":" How"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":" can"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":" I"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":" assist"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":" you"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":" today"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{"content":"?"},"index":0,"finish_reason":null}]}

data: {"id":"chatcmpl-6wX08lq8WfVmjhRkMyEIM2WvTByTg","object":"chat.completion.chunk","created":1679407900,"model":"gpt-3.5-turbo-0301","choices":[{"delta":{},"index":0,"finish_reason":"stop"}]}

data: [DONE]

测试.js

const { Configuration, OpenAIApi } = require('openai');

const configuration = new Configuration({
  apiKey: process.env.OPENAI_API_KEY,
});

const openai = new OpenAIApi(configuration);

async function getCompletionFromOpenAI() {
  const completionStream = await openai.createChatCompletion({
    model: 'gpt-3.5-turbo',
    messages: [{ role: 'user', content: 'Hello!' }],
    temperature: 0,
    stream: true,
  });

  console.log(completionStream.data);
}

getCompletionFromOpenAI();

相关问题