const express = require('express');
const dotenv = require("dotenv");
const cors = require("cors");
const { Configuration, OpenAIApi } = require("openai");
dotenv.config();
const configuration = new Configuration({
apiKey: process.env.API_KEY,
});
const openai = new OpenAIApi(configuration);
// console.log(process.env.API_KEY)
const app = express();
app.use(cors());
app.use(express.json());
const PORT = 5001;
app.get("/", async (req, res) => {
res.status(200).send({ message: "Welcome to SpinoTech GPT-3" });
})
app.post("/", async (req, res) => {
try {
const data = req.body;
// console.log(data.data);
const response = await openai.createCompletion({
model: "text-davinci-003",
prompt: data.data,
temperature: 0,
max_tokens: 300,
top_p: 1,
frequency_penalty: 0.5,
presence_penalty: 0,
});
// console.log(response.data.choices[0].text);
res.status(200).send({
bot:response.data.choices[0].text
})
} catch (error) {
// console.error(error);
res.status(500).send(error);
}
})
app.listen(PORT, () => {
console.log(`GPT backend is running on port http://localhost:${PORT}/`);
})
字符串
当我在openai.createCompletion()之前控制台登录data.data时,我可以获得我的消息,但在我无法获得我的消息之后,我甚至没有从openai.createCompletion()获得我的响应
首先,我传递了一个对象{data:“my question for chatGPT”},就像前端的数据一样。当我调用post调用时,我在后端获取数据。但是我没有从OpenAI模型中得到任何响应。它给出了一些错误somthing链接:-
Error: Request failed with status code 429
at createError (F:\web devolopment tutorials\react js file\chatGPT\chatgpt_backend\node_modules\axios\lib\core\createError.js:16:15)
at settle (F:\web devolopment tutorials\react js file\chatGPT\chatgpt_backend\node_modules\axios\lib\core\settle.js:17:12)
at IncomingMessage.handleStreamEnd (F:\web devolopment tutorials\react js file\chatGPT\chatgpt_backend\node_modules\axios\lib\adapters\http.js:322:11)
at IncomingMessage.emit (node:events:525:35)
at endReadableNT (node:internal/streams/readable:1359:12)
at process.processTicksAndRejections (node:internal/process/task_queues:82:21) {
config: {
transitional: {
silentJSONParsing: true,
forcedJSONParsing: true,
clarifyTimeoutError: false
},
adapter: [Function: httpAdapter],
transformRequest: [ [Function: transformRequest] ],
transformResponse: [ [Function: transformResponse] ],
timeout: 0,
xsrfCookieName: 'XSRF-TOKEN',
xsrfHeaderName: 'X-XSRF-TOKEN',
maxContentLength: -1,
maxBodyLength: -1,
validateStatus: [Function: validateStatus],
headers: {
Accept: 'application/json, text/plain, */*',
'Content-Type': 'application/json',
'User-Agent': 'OpenAI/NodeJS/3.2.1',
Authorization: 'Bearer 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',
'Content-Length': 130
},
method: 'post',
data: '{"model":"text-davinci-003","prompt":"hh","temperature":0,"max_tokens":300,"top_p":1,"frequency_penalty":0.5,"presence_penalty":0}',
url: 'https://api.openai.com/v1/completions'
},
request: <ref *1> ClientRequest {
_events: [Object: null prototype] {
abort: [Function (anonymous)],
aborted: [Function (anonymous)],
connect: [Function (anonymous)],
error: [Function (anonymous)],
socket: [Function (anonymous)],
timeout: [Function (anonymous)],
finish: [Function: requestOnFinish]
},
_eventsCount: 7,
_maxListeners: undefined,
outputData: [],
outputSize: 0,
writable: true,
destroyed: false,
_last: true,
chunkedEncoding: false,
shouldKeepAlive: false,
maxRequestsOnConnectionReached: false,
_defaultKeepAlive: true,
useChunkedEncodingByDefault: true,
sendDate: false,
_removedConnection: false,
_removedContLen: false,
_removedTE: false,
strictContentLength: false,
_contentLength: 130,
_hasBody: true,
_trailer: '',
finished: true,
_headerSent: true,
_closed: false,
socket: TLSSocket {
_tlsOptions: [Object],
_secureEstablished: true,
_securePending: false,
_newSessionPending: false,
_controlReleased: true,
secureConnecting: false,
_SNICallback: null,
servername: 'api.openai.com',
alpnProtocol: false,
authorized: true,
authorizationError: null,
encrypted: true,
_events: [Object: null prototype],
_eventsCount: 10,
connecting: false,
_hadError: false,
_parent: null,
_host: 'api.openai.com',
_closeAfterHandlingError: false,
_readableState: [ReadableState],
_maxListeners: undefined,
_writableState: [WritableState],
allowHalfOpen: false,
_sockname: null,
_pendingData: null,
_pendingEncoding: '',
server: undefined,
_server: null,
ssl: [TLSWrap],
_requestCert: true,
_rejectUnauthorized: true,
parser: null,
_httpMessage: [Circular *1],
[Symbol(res)]: [TLSWrap],
[Symbol(verified)]: true,
[Symbol(pendingSession)]: null,
[Symbol(async_id_symbol)]: 35,
[Symbol(kHandle)]: [TLSWrap],
[Symbol(lastWriteQueueSize)]: 0,
[Symbol(timeout)]: null,
[Symbol(kBuffer)]: null,
[Symbol(kBufferCb)]: null,
[Symbol(kBufferGen)]: null,
[Symbol(kCapture)]: false,
[Symbol(kSetNoDelay)]: false,
[Symbol(kSetKeepAlive)]: true,
[Symbol(kSetKeepAliveInitialDelay)]: 60,
[Symbol(kBytesRead)]: 0,
[Symbol(kBytesWritten)]: 0,
[Symbol(connect-options)]: [Object]
},
_header: 'POST /v1/completions HTTP/1.1\r\n' +
'Accept: application/json, text/plain, */*\r\n' +
'Content-Type: application/json\r\n' +
'User-Agent: OpenAI/NodeJS/3.2.1\r\n' +
'Authorization: Bearer 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\r\n' +
'Content-Length: 130\r\n' +
'Host: api.openai.com\r\n' +
'Connection: close\r\n' +
'\r\n',
_keepAliveTimeout: 0,
_onPendingData: [Function: nop],
agent: Agent {
_events: [Object: null prototype],
_eventsCount: 2,
_maxListeners: undefined,
defaultPort: 443,
protocol: 'https:',
options: [Object: null prototype],
requests: [Object: null prototype] {},
sockets: [Object: null prototype],
freeSockets: [Object: null prototype] {},
keepAliveMsecs: 1000,
keepAlive: false,
maxSockets: Infinity,
maxFreeSockets: 256,
scheduling: 'lifo',
maxTotalSockets: Infinity,
totalSocketCount: 1,
maxCachedSessions: 100,
_sessionCache: [Object],
[Symbol(kCapture)]: false
},
socketPath: undefined,
method: 'POST',
maxHeaderSize: undefined,
insecureHTTPParser: undefined,
joinDuplicateHeaders: undefined,
path: '/v1/completions',
_ended: true,
res: IncomingMessage {
_readableState: [ReadableState],
_events: [Object: null prototype],
_eventsCount: 4,
_maxListeners: undefined,
socket: [TLSSocket],
httpVersionMajor: 1,
httpVersionMinor: 1,
httpVersion: '1.1',
complete: true,
rawHeaders: [Array],
rawTrailers: [],
joinDuplicateHeaders: undefined,
aborted: false,
upgrade: false,
url: '',
method: null,
statusCode: 429,
statusMessage: 'Too Many Requests',
client: [TLSSocket],
_consuming: false,
_dumped: false,
req: [Circular *1],
responseUrl: 'https://api.openai.com/v1/completions',
redirects: [],
[Symbol(kCapture)]: false,
[Symbol(kHeaders)]: [Object],
[Symbol(kHeadersCount)]: 22,
[Symbol(kTrailers)]: null,
[Symbol(kTrailersCount)]: 0
},
aborted: false,
timeoutCb: null,
upgradeOrConnect: false,
parser: null,
maxHeadersCount: null,
reusedSocket: false,
host: 'api.openai.com',
protocol: 'https:',
_redirectable: Writable {
_writableState: [WritableState],
_events: [Object: null prototype],
_eventsCount: 3,
_maxListeners: undefined,
_options: [Object],
_ended: true,
_ending: true,
_redirectCount: 0,
_redirects: [],
_requestBodyLength: 130,
_requestBodyBuffers: [],
_onNativeResponse: [Function (anonymous)],
_currentRequest: [Circular *1],
_currentUrl: 'https://api.openai.com/v1/completions',
[Symbol(kCapture)]: false
},
[Symbol(kCapture)]: false,
[Symbol(kBytesWritten)]: 0,
[Symbol(kNeedDrain)]: false,
[Symbol(corked)]: 0,
[Symbol(kOutHeaders)]: [Object: null prototype] {
accept: [Array],
'content-type': [Array],
'user-agent': [Array],
authorization: [Array],
'content-length': [Array],
host: [Array]
},
[Symbol(errored)]: null,
[Symbol(kUniqueHeaders)]: null
},
response: {
status: 429,
statusText: 'Too Many Requests',
headers: {
date: 'Mon, 31 Jul 2023 08:47:25 GMT',
'content-type': 'application/json; charset=utf-8',
'content-length': '222',
connection: 'close',
vary: 'Origin',
'x-request-id': 'e8c4010eede0d3028c80108d480989a0',
'strict-transport-security': 'max-age=15724800; includeSubDomains',
'cf-cache-status': 'DYNAMIC',
server: 'cloudflare',
'cf-ray': '7ef49396dd0b41b6-BOM',
'alt-svc': 'h3=":443"; ma=86400'
},
config: {
transitional: [Object],
adapter: [Function: httpAdapter],
transformRequest: [Array],
transformResponse: [Array],
timeout: 0,
xsrfCookieName: 'XSRF-TOKEN',
xsrfHeaderName: 'X-XSRF-TOKEN',
maxContentLength: -1,
maxBodyLength: -1,
validateStatus: [Function: validateStatus],
headers: [Object],
method: 'post',
data: '{"model":"text-davinci-003","prompt":"hh","temperature":0,"max_tokens":300,"top_p":1,"frequency_penalty":0.5,"presence_penalty":0}',
url: 'https://api.openai.com/v1/completions'
},
request: <ref *1> ClientRequest {
_events: [Object: null prototype],
_eventsCount: 7,
_maxListeners: undefined,
outputData: [],
outputSize: 0,
writable: true,
destroyed: false,
_last: true,
chunkedEncoding: false,
shouldKeepAlive: false,
maxRequestsOnConnectionReached: false,
_defaultKeepAlive: true,
useChunkedEncodingByDefault: true,
sendDate: false,
_removedConnection: false,
_removedContLen: false,
_removedTE: false,
strictContentLength: false,
_contentLength: 130,
_hasBody: true,
_trailer: '',
finished: true,
_headerSent: true,
_closed: false,
socket: [TLSSocket],
_header: 'POST /v1/completions HTTP/1.1\r\n' +
'Accept: application/json, text/plain, */*\r\n' +
'Content-Type: application/json\r\n' +
'User-Agent: OpenAI/NodeJS/3.2.1\r\n' +
'Authorization: Bearer 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\r\n' +
'Content-Length: 130\r\n' +
'Host: api.openai.com\r\n' +
'Connection: close\r\n' +
'\r\n',
_keepAliveTimeout: 0,
_onPendingData: [Function: nop],
agent: [Agent],
socketPath: undefined,
method: 'POST',
maxHeaderSize: undefined,
insecureHTTPParser: undefined,
joinDuplicateHeaders: undefined,
path: '/v1/completions',
_ended: true,
res: [IncomingMessage],
aborted: false,
timeoutCb: null,
upgradeOrConnect: false,
parser: null,
maxHeadersCount: null,
reusedSocket: false,
host: 'api.openai.com',
protocol: 'https:',
_redirectable: [Writable],
[Symbol(kCapture)]: false,
[Symbol(kBytesWritten)]: 0,
[Symbol(kNeedDrain)]: false,
[Symbol(corked)]: 0,
[Symbol(kOutHeaders)]: [Object: null prototype],
[Symbol(errored)]: null,
[Symbol(kUniqueHeaders)]: null
},
data: { error: [Object] }
},
isAxiosError: true,
toJSON: [Function: toJSON]
}
型
1条答案
按热度按时间mfpqipee1#
原因
此错误消息表示您已达到为API分配的速率限制。这意味着您在短时间内提交了太多令牌或请求,并且已经超过了允许的请求数量。我猜你正在使用一个免费的计划,有一个低利率的限制。
解决方案
如果您使用的是免费或低等级计划,请考虑升级到提供更高费率限制的现收现付计划。