From e67862e230d54f2006e8c47421abb9d27cd0afa8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E9=98=BF=E6=A3=AE?= Date: Thu, 7 Dec 2023 20:02:03 +0800 Subject: [PATCH] feat(api): add completion api --- web/studio/app/api/completion/minimax/ai.ts | 47 +++++++++++++++++ .../app/api/completion/minimax/route.ts | 22 +------- web/studio/app/api/completion/openai/ai.ts | 45 +++++++++++++++++ web/studio/app/api/completion/openai/route.ts | 19 ++----- web/studio/app/api/completion/qwen/ai.ts | 48 ++++++++++++++++++ web/studio/app/api/completion/qwen/route.ts | 19 ++----- web/studio/app/api/completion/route.ts | 50 +++++++++++++++++++ web/studio/app/api/completion/yiyan/ai.ts | 47 +++++++++++++++++ web/studio/app/api/completion/yiyan/route.ts | 21 ++------ 9 files changed, 248 insertions(+), 70 deletions(-) create mode 100644 web/studio/app/api/completion/minimax/ai.ts create mode 100644 web/studio/app/api/completion/openai/ai.ts create mode 100644 web/studio/app/api/completion/qwen/ai.ts create mode 100644 web/studio/app/api/completion/route.ts create mode 100644 web/studio/app/api/completion/yiyan/ai.ts diff --git a/web/studio/app/api/completion/minimax/ai.ts b/web/studio/app/api/completion/minimax/ai.ts new file mode 100644 index 0000000..5f5e07d --- /dev/null +++ b/web/studio/app/api/completion/minimax/ai.ts @@ -0,0 +1,47 @@ +import OpenAI from 'openai'; +import { MinimaxAI } from '@studio-b3/llmapi'; + +// See https://api.minimax.chat/user-center/basic-information/interface-key +const api = new MinimaxAI({ + orgId: process.env.MINIMAX_API_ORG, + apiKey: process.env.MINIMAX_API_KEY, +}); + +export type AskParams = { + model?: MinimaxAI.ChatModel; + prompt: string; + system?: string; + temperature?: number; + presence_penalty?: number; + max_tokens?: number; +}; + +export function askAI({ + model = 'abab5-chat', + prompt, + system, + temperature = 0.6, + ...rest +}: AskParams) { + const messages: OpenAI.ChatCompletionMessageParam[] = [ + { + role: 'user', + content: prompt, + }, + ]; + + if (system) { + messages.unshift({ + role: 'system', + content: system, + }); + } + + return api.chat.completions.create({ + ...rest, + stream: true, + model, + temperature, + messages, + }); +} diff --git a/web/studio/app/api/completion/minimax/route.ts b/web/studio/app/api/completion/minimax/route.ts index b11b02b..6a9c226 100644 --- a/web/studio/app/api/completion/minimax/route.ts +++ b/web/studio/app/api/completion/minimax/route.ts @@ -1,29 +1,11 @@ import { OpenAIStream, StreamingTextResponse } from 'ai'; -import { MinimaxAI } from '@studio-b3/llmapi'; - -// See https://api.minimax.chat/user-center/basic-information/interface-key -const api = new MinimaxAI({ - orgId: process.env.MINIMAX_API_ORG, - apiKey: process.env.MINIMAX_API_KEY, -}); +import { askAI } from './ai'; // export const runtime = 'edge'; export async function POST(req: Request) { - const { prompt } = await req.json(); - - const response = await api.chat.completions.create({ - model: 'abab5.5-chat', - stream: true, - temperature: 0.6, - messages: [ - { - role: 'user', - content: prompt, - }, - ], - }); + const response = await askAI(await req.json()); // Convert the response into a friendly text-stream const stream = OpenAIStream(response); diff --git a/web/studio/app/api/completion/openai/ai.ts b/web/studio/app/api/completion/openai/ai.ts new file mode 100644 index 0000000..bb6026a --- /dev/null +++ b/web/studio/app/api/completion/openai/ai.ts @@ -0,0 +1,45 @@ +import OpenAI from 'openai'; + +// Create an OpenAI API client (that's edge friendly!) +const api = new OpenAI({ + apiKey: process.env.OPENAI_API_KEY || '', +}); + +export type AskParams = { + model?: OpenAI.ChatCompletionCreateParams['model']; + prompt: string; + system?: string; + temperature?: number; + presence_penalty?: number; + max_tokens?: number; +}; + +export function askAI({ + model = 'gpt-3.5-turbo', + prompt, + system, + temperature = 0.6, + ...rest +}: AskParams) { + const messages: OpenAI.ChatCompletionMessageParam[] = [ + { + role: 'user', + content: prompt, + }, + ]; + + if (system) { + messages.unshift({ + role: 'system', + content: system, + }); + } + + return api.chat.completions.create({ + ...rest, + stream: true, + model, + temperature, + messages, + }); +} diff --git a/web/studio/app/api/completion/openai/route.ts b/web/studio/app/api/completion/openai/route.ts index cb4cded..ffac3bc 100644 --- a/web/studio/app/api/completion/openai/route.ts +++ b/web/studio/app/api/completion/openai/route.ts @@ -1,10 +1,6 @@ -import OpenAI from 'openai'; import { OpenAIStream, StreamingTextResponse } from 'ai'; -// Create an OpenAI API client (that's edge friendly!) -const openai = new OpenAI({ - apiKey: process.env.OPENAI_API_KEY || '', -}); +import { askAI } from './ai' // Set the runtime to edge for best performance // export const runtime = 'edge'; @@ -13,17 +9,8 @@ export async function POST(req: Request) { const { prompt } = await req.json(); // Ask OpenAI for a streaming completion given the prompt - const response = await openai.chat.completions.create({ - model: 'gpt-3.5-turbo', - stream: true, - temperature: 0.6, - max_tokens: 1200, - messages: [ - { - role: 'user', - content: prompt, - }, - ] + const response = await askAI({ + prompt }); // Convert the response into a friendly text-stream const stream = OpenAIStream(response); diff --git a/web/studio/app/api/completion/qwen/ai.ts b/web/studio/app/api/completion/qwen/ai.ts new file mode 100644 index 0000000..436df6e --- /dev/null +++ b/web/studio/app/api/completion/qwen/ai.ts @@ -0,0 +1,48 @@ +import OpenAI from 'openai'; +import { QWenAI } from '@studio-b3/llmapi'; + +const api = new QWenAI({ + // https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key + apiKey: process.env.QWEN_API_KEY || '', +}); + +export type AskParams = { + model?: QWenAI.ChatModel; + prompt: string; + system?: string; + temperature?: number; + presence_penalty?: number; + max_tokens?: number; +}; + +export function askAI({ + model = 'qwen-max', + prompt, + system, + max_tokens, + temperature = 0.9, + ...rest +}: AskParams) { + const messages: OpenAI.ChatCompletionMessageParam[] = [ + { + role: 'user', + content: prompt, + }, + ]; + + if (system) { + messages.unshift({ + role: 'system', + content: system, + }); + } + + return api.chat.completions.create({ + ...rest, + stream: true, + model, + temperature, + max_tokens, + messages, + }); +} diff --git a/web/studio/app/api/completion/qwen/route.ts b/web/studio/app/api/completion/qwen/route.ts index 5b708af..948ccfe 100644 --- a/web/studio/app/api/completion/qwen/route.ts +++ b/web/studio/app/api/completion/qwen/route.ts @@ -1,27 +1,14 @@ import { OpenAIStream, StreamingTextResponse } from "ai"; -import { QWenAI } from "@studio-b3/llmapi"; - -const api = new QWenAI({ - // https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key - apiKey: process.env.QWEN_API_KEY || "", -}); +import { askAI } from './ai' // export const runtime = 'edge'; export async function POST(req: Request) { const { prompt } = await req.json(); - const response = await api.chat.completions.create({ - model: "qwen-max", - stream: true, - temperature: 0.6, - messages: [ - { - role: "user", - content: prompt, - }, - ], + const response = await askAI({ + prompt }); // Convert the response into a friendly text-stream diff --git a/web/studio/app/api/completion/route.ts b/web/studio/app/api/completion/route.ts new file mode 100644 index 0000000..8536464 --- /dev/null +++ b/web/studio/app/api/completion/route.ts @@ -0,0 +1,50 @@ +import { OpenAIStream, StreamingTextResponse } from 'ai'; + +import * as minimax from './minimax/ai'; +import * as openai from './openai/ai'; +import * as qwen from './qwen/ai'; +import * as yiyan from './yiyan/ai'; + +export type AskParams = + | minimax.AskParams + | openai.AskParams + | qwen.AskParams + | yiyan.AskParams; + +const handlers = [ + { + match: /abab/, + handle: minimax.askAI, + }, + { + match: /qwen/, + handle: qwen.askAI, + }, + { + match: /ernie/, + handle: yiyan.askAI, + }, +]; + +const fallback = openai.askAI; + +function askAI(params: AskParams) { + const model = params.model; + + if (!model) return fallback(params); + + const matches = handlers.find((h) => h.match.test(model)); + const handle = matches?.handle || fallback; + + // eslint-disable-next-line @typescript-eslint/ban-ts-comment + // @ts-expect-error + return handle(params); +} + +export async function POST(req: Request) { + const response = await askAI(await req.json()); + + const stream = OpenAIStream(response); + + return new StreamingTextResponse(stream); +} diff --git a/web/studio/app/api/completion/yiyan/ai.ts b/web/studio/app/api/completion/yiyan/ai.ts new file mode 100644 index 0000000..7f1601b --- /dev/null +++ b/web/studio/app/api/completion/yiyan/ai.ts @@ -0,0 +1,47 @@ +import OpenAI from 'openai'; +import { ErnieAI } from '@studio-b3/llmapi'; + +const api = new ErnieAI({ + // 访问令牌通过编程对 AI Studio ⽤户进⾏身份验证 + // https://aistudio.baidu.com/index/accessToken + token: process.env.AISTUDIO_ACCESS_TOKEN || '', +}); + +export type AskParams = { + model?: ErnieAI.ChatModel; + prompt: string; + system?: string; + temperature?: number; + presence_penalty?: number; + max_tokens?: number; +}; + +export function askAI({ + model = 'ernie-bot', + prompt, + system, + temperature = 0.6, + ...rest +}: AskParams) { + const messages: OpenAI.ChatCompletionMessageParam[] = [ + { + role: 'user', + content: prompt, + }, + ]; + + if (system) { + messages.unshift({ + role: 'system', + content: system, + }); + } + + return api.chat.completions.create({ + ...rest, + stream: true, + model, + temperature, + messages, + }); +} diff --git a/web/studio/app/api/completion/yiyan/route.ts b/web/studio/app/api/completion/yiyan/route.ts index 94923d9..ab1ec08 100644 --- a/web/studio/app/api/completion/yiyan/route.ts +++ b/web/studio/app/api/completion/yiyan/route.ts @@ -1,29 +1,14 @@ import { OpenAIStream, StreamingTextResponse } from 'ai'; -import { ErnieAI } from '@studio-b3/llmapi'; - -const api = new ErnieAI({ - // 访问令牌通过编程对 AI Studio ⽤户进⾏身份验证 - // https://aistudio.baidu.com/index/accessToken - token: process.env.AISTUDIO_ACCESS_TOKEN || '', -}); +import { askAI } from './ai' // export const runtime = 'edge'; export async function POST(req: Request) { const { prompt } = await req.json(); - const response = await api.chat.completions.create({ - model: 'ernie-bot', - stream: true, - temperature: 0.6, - max_tokens: 1000, - messages: [ - { - role: 'user', - content: prompt, - }, - ], + const response = await askAI({ + prompt }); const stream = OpenAIStream(response);