Skip to content

Commit

Permalink
feat(api): add completion api
Browse files Browse the repository at this point in the history
  • Loading branch information
zhengxs2018 committed Dec 7, 2023
1 parent c5791a7 commit e67862e
Show file tree
Hide file tree
Showing 9 changed files with 248 additions and 70 deletions.
47 changes: 47 additions & 0 deletions web/studio/app/api/completion/minimax/ai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import OpenAI from 'openai';
import { MinimaxAI } from '@studio-b3/llmapi';

// See https://api.minimax.chat/user-center/basic-information/interface-key
const api = new MinimaxAI({
orgId: process.env.MINIMAX_API_ORG,
apiKey: process.env.MINIMAX_API_KEY,
});

export type AskParams = {
model?: MinimaxAI.ChatModel;
prompt: string;
system?: string;
temperature?: number;
presence_penalty?: number;
max_tokens?: number;
};

export function askAI({
model = 'abab5-chat',
prompt,
system,
temperature = 0.6,
...rest
}: AskParams) {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{
role: 'user',
content: prompt,
},
];

if (system) {
messages.unshift({
role: 'system',
content: system,
});
}

return api.chat.completions.create({
...rest,
stream: true,
model,
temperature,
messages,
});
}
22 changes: 2 additions & 20 deletions web/studio/app/api/completion/minimax/route.ts
Original file line number Diff line number Diff line change
@@ -1,29 +1,11 @@
import { OpenAIStream, StreamingTextResponse } from 'ai';

import { MinimaxAI } from '@studio-b3/llmapi';

// See https://api.minimax.chat/user-center/basic-information/interface-key
const api = new MinimaxAI({
orgId: process.env.MINIMAX_API_ORG,
apiKey: process.env.MINIMAX_API_KEY,
});
import { askAI } from './ai';

// export const runtime = 'edge';

export async function POST(req: Request) {
const { prompt } = await req.json();

const response = await api.chat.completions.create({
model: 'abab5.5-chat',
stream: true,
temperature: 0.6,
messages: [
{
role: 'user',
content: prompt,
},
],
});
const response = await askAI(await req.json());

// Convert the response into a friendly text-stream
const stream = OpenAIStream(response);
Expand Down
45 changes: 45 additions & 0 deletions web/studio/app/api/completion/openai/ai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
import OpenAI from 'openai';

// Create an OpenAI API client (that's edge friendly!)
const api = new OpenAI({
apiKey: process.env.OPENAI_API_KEY || '',
});

export type AskParams = {
model?: OpenAI.ChatCompletionCreateParams['model'];
prompt: string;
system?: string;
temperature?: number;
presence_penalty?: number;
max_tokens?: number;
};

export function askAI({
model = 'gpt-3.5-turbo',
prompt,
system,
temperature = 0.6,
...rest
}: AskParams) {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{
role: 'user',
content: prompt,
},
];

if (system) {
messages.unshift({
role: 'system',
content: system,
});
}

return api.chat.completions.create({
...rest,
stream: true,
model,
temperature,
messages,
});
}
19 changes: 3 additions & 16 deletions web/studio/app/api/completion/openai/route.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,6 @@
import OpenAI from 'openai';
import { OpenAIStream, StreamingTextResponse } from 'ai';

// Create an OpenAI API client (that's edge friendly!)
const openai = new OpenAI({
apiKey: process.env.OPENAI_API_KEY || '',
});
import { askAI } from './ai'

// Set the runtime to edge for best performance
// export const runtime = 'edge';
Expand All @@ -13,17 +9,8 @@ export async function POST(req: Request) {
const { prompt } = await req.json();

// Ask OpenAI for a streaming completion given the prompt
const response = await openai.chat.completions.create({
model: 'gpt-3.5-turbo',
stream: true,
temperature: 0.6,
max_tokens: 1200,
messages: [
{
role: 'user',
content: prompt,
},
]
const response = await askAI({
prompt
});
// Convert the response into a friendly text-stream
const stream = OpenAIStream(response);
Expand Down
48 changes: 48 additions & 0 deletions web/studio/app/api/completion/qwen/ai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import OpenAI from 'openai';
import { QWenAI } from '@studio-b3/llmapi';

const api = new QWenAI({
// https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key
apiKey: process.env.QWEN_API_KEY || '',
});

export type AskParams = {
model?: QWenAI.ChatModel;
prompt: string;
system?: string;
temperature?: number;
presence_penalty?: number;
max_tokens?: number;
};

export function askAI({
model = 'qwen-max',
prompt,
system,
max_tokens,
temperature = 0.9,
...rest
}: AskParams) {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{
role: 'user',
content: prompt,
},
];

if (system) {
messages.unshift({
role: 'system',
content: system,
});
}

return api.chat.completions.create({
...rest,
stream: true,
model,
temperature,
max_tokens,
messages,
});
}
19 changes: 3 additions & 16 deletions web/studio/app/api/completion/qwen/route.ts
Original file line number Diff line number Diff line change
@@ -1,27 +1,14 @@
import { OpenAIStream, StreamingTextResponse } from "ai";

import { QWenAI } from "@studio-b3/llmapi";

const api = new QWenAI({
// https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key
apiKey: process.env.QWEN_API_KEY || "",
});
import { askAI } from './ai'

// export const runtime = 'edge';

export async function POST(req: Request) {
const { prompt } = await req.json();

const response = await api.chat.completions.create({
model: "qwen-max",
stream: true,
temperature: 0.6,
messages: [
{
role: "user",
content: prompt,
},
],
const response = await askAI({
prompt
});

// Convert the response into a friendly text-stream
Expand Down
50 changes: 50 additions & 0 deletions web/studio/app/api/completion/route.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,50 @@
import { OpenAIStream, StreamingTextResponse } from 'ai';

import * as minimax from './minimax/ai';
import * as openai from './openai/ai';
import * as qwen from './qwen/ai';
import * as yiyan from './yiyan/ai';

export type AskParams =
| minimax.AskParams
| openai.AskParams
| qwen.AskParams
| yiyan.AskParams;

const handlers = [
{
match: /abab/,
handle: minimax.askAI,
},
{
match: /qwen/,
handle: qwen.askAI,
},
{
match: /ernie/,
handle: yiyan.askAI,
},
];

const fallback = openai.askAI;

function askAI(params: AskParams) {
const model = params.model;

if (!model) return fallback(params);

const matches = handlers.find((h) => h.match.test(model));
const handle = matches?.handle || fallback;

// eslint-disable-next-line @typescript-eslint/ban-ts-comment
// @ts-expect-error
return handle(params);
}

export async function POST(req: Request) {
const response = await askAI(await req.json());

const stream = OpenAIStream(response);

return new StreamingTextResponse(stream);
}
47 changes: 47 additions & 0 deletions web/studio/app/api/completion/yiyan/ai.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
import OpenAI from 'openai';
import { ErnieAI } from '@studio-b3/llmapi';

const api = new ErnieAI({
// 访问令牌通过编程对 AI Studio ⽤户进⾏身份验证
// https://aistudio.baidu.com/index/accessToken
token: process.env.AISTUDIO_ACCESS_TOKEN || '',
});

export type AskParams = {
model?: ErnieAI.ChatModel;
prompt: string;
system?: string;
temperature?: number;
presence_penalty?: number;
max_tokens?: number;
};

export function askAI({
model = 'ernie-bot',
prompt,
system,
temperature = 0.6,
...rest
}: AskParams) {
const messages: OpenAI.ChatCompletionMessageParam[] = [
{
role: 'user',
content: prompt,
},
];

if (system) {
messages.unshift({
role: 'system',
content: system,
});
}

return api.chat.completions.create({
...rest,
stream: true,
model,
temperature,
messages,
});
}
21 changes: 3 additions & 18 deletions web/studio/app/api/completion/yiyan/route.ts
Original file line number Diff line number Diff line change
@@ -1,29 +1,14 @@
import { OpenAIStream, StreamingTextResponse } from 'ai';

import { ErnieAI } from '@studio-b3/llmapi';

const api = new ErnieAI({
// 访问令牌通过编程对 AI Studio ⽤户进⾏身份验证
// https://aistudio.baidu.com/index/accessToken
token: process.env.AISTUDIO_ACCESS_TOKEN || '',
});
import { askAI } from './ai'

// export const runtime = 'edge';

export async function POST(req: Request) {
const { prompt } = await req.json();

const response = await api.chat.completions.create({
model: 'ernie-bot',
stream: true,
temperature: 0.6,
max_tokens: 1000,
messages: [
{
role: 'user',
content: prompt,
},
],
const response = await askAI({
prompt
});

const stream = OpenAIStream(response);
Expand Down

0 comments on commit e67862e

Please sign in to comment.