forked from openai/openai-node
-
Notifications
You must be signed in to change notification settings - Fork 0
/
stream-to-client-express.ts
executable file
·52 lines (43 loc) · 1.35 KB
/
stream-to-client-express.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
#!/usr/bin/env -S npm run tsn -T
// This file demonstrates how to stream from the server the chunks as
// a new-line separated JSON-encoded stream.
import OpenAI from 'openai';
import express, { Request, Response } from 'express';
const openai = new OpenAI();
const app = express();
app.use(express.text());
// This endpoint can be called with:
//
// curl 127.0.0.1:3000 -N -X POST -H 'Content-Type: text/plain' \
// --data 'Can you explain why dogs are better than cats?'
//
// Or consumed with fetch:
//
// fetch('http://localhost:3000', {
// method: 'POST',
// body: 'Tell me why dogs are better than cats',
// }).then(async res => {
// const runner = ChatCompletionStreamingRunner.fromReadableStream(res)
// })
//
// See examples/stream-to-client-browser.ts for a more complete example.
app.post('/', async (req: Request, res: Response) => {
try {
console.log('Received request:', req.body);
const stream = openai.beta.chat.completions.stream({
model: 'gpt-3.5-turbo',
stream: true,
messages: [{ role: 'user', content: req.body }],
});
res.header('Content-Type', 'text/plain');
for await (const chunk of stream.toReadableStream()) {
res.write(chunk);
}
res.end();
} catch (e) {
console.error(e);
}
});
app.listen('3000', () => {
console.log('Started proxy express server');
});