forked from xiangsx/gpt4free-ts
-
Notifications
You must be signed in to change notification settings - Fork 2
/
index.ts
94 lines (85 loc) · 2.58 KB
/
index.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
import Koa, {Context, Next} from 'koa';
import Router from 'koa-router'
import bodyParser from 'koa-bodyparser';
import {ChatModelFactory, Site} from "./model";
import dotenv from 'dotenv';
import {ChatRequest, ChatResponse, ModelType, PromptToString} from "./model/base";
import {Event, EventStream} from "./utils";
dotenv.config();
const app = new Koa();
const router = new Router();
const errorHandler = async (ctx: Context, next: Next) => {
try {
await next();
} catch (err: any) {
console.error(err);
ctx.body = JSON.stringify(err);
ctx.res.end();
}
};
app.use(errorHandler);
app.use(bodyParser());
const chatModel = new ChatModelFactory();
interface AskReq extends ChatRequest {
site: Site;
}
interface AskRes extends ChatResponse {
}
router.get('/ask', async (ctx) => {
const {prompt, model = ModelType.GPT3p5Turbo, site = Site.You} = ctx.query as unknown as AskReq;
if (!prompt) {
ctx.body = {error: `need prompt in query`} as AskRes;
return;
}
const chat = chatModel.get(site);
if (!chat) {
ctx.body = {error: `not support site: ${site} `} as AskRes;
return;
}
const tokenLimit = chat.support(model);
if (!tokenLimit) {
ctx.body = {error: `${site} not support model ${model}`} as AskRes;
return;
}
ctx.body = await chat.ask({prompt: PromptToString(prompt, tokenLimit), model});
});
router.get('/ask/stream', async (ctx) => {
const {prompt, model = ModelType.GPT3p5Turbo, site = Site.You} = ctx.query as unknown as AskReq;
ctx.set({
"Content-Type": "text/event-stream;charset=utf-8",
"Cache-Control": "no-cache",
"Connection": "keep-alive",
});
const es = new EventStream();
ctx.body = es.stream();
if (!prompt) {
es.write(Event.error, {error: 'need prompt in query'})
es.end();
return;
}
const chat = chatModel.get(site);
if (!chat) {
es.write(Event.error, {error: `not support site: ${site} `})
es.end();
return;
}
const tokenLimit = chat.support(model);
if (!tokenLimit) {
es.write(Event.error, {error: `${site} not support model ${model}`})
es.end();
return;
}
await chat.askStream({prompt: PromptToString(prompt, tokenLimit), model}, es);
ctx.body = es.stream();
})
app.use(router.routes());
(async () => {
const server = app.listen(3000, () => {
console.log("Now listening: 127.0.0.1:3000");
});
process.on('SIGINT', () => {
server.close(() => {
process.exit(0);
});
});
})()