diff --git a/.env.example b/.env.example index 280350c..7ac88c9 100644 --- a/.env.example +++ b/.env.example @@ -8,4 +8,13 @@ QWEN_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxx" # OpenAI # See https://platform.openai.com/account/api-keys -OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxx" \ No newline at end of file +OPENAI_API_KEY="sk-xxxxxxxxxxxxxxxxxxxxx" + +# Minimax +# See https://api.minimax.chat/user-center/basic-information/interface-key +MINIMAX_API_ORG="xxxxxxxx" +MINIMAX_API_KEY="eyJhxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" + +# Imagine Art +# see https://platform.imagine.art/dashboard +VYRO_API_KEY="vk-xxxxxxxxxxxxxxxxxxxxxxxxxxxxxx" diff --git a/.gitignore b/.gitignore index bafe82d..3c52e30 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,5 @@ -/node_modules +node_modules/ + .nx/installation .nx/cache .env diff --git a/.node-version b/.node-version new file mode 100644 index 0000000..8cbe185 --- /dev/null +++ b/.node-version @@ -0,0 +1 @@ +18.x.x \ No newline at end of file diff --git a/web/core/.eslintrc.cjs b/web/core/.eslintrc.cjs index b408e01..cd673bc 100644 --- a/web/core/.eslintrc.cjs +++ b/web/core/.eslintrc.cjs @@ -3,6 +3,10 @@ const { defineConfig } = require('eslint-define-config') module.exports = defineConfig({ root: true, + env: { + node: true, + browser: true + }, extends: [ 'eslint:recommended', ], diff --git a/web/llmapi/package.json b/web/llmapi/package.json new file mode 100644 index 0000000..d01840d --- /dev/null +++ b/web/llmapi/package.json @@ -0,0 +1,37 @@ +{ + "name": "@studio-b3/llmapi", + "version": "0.0.1", + "type": "module", + "main": "dist/index.mjs", + "types": "dist-types/index.d.ts", + "exports": { + ".": { + "import": "./dist/index.mjs" + }, + "./package.json": "./package.json" + }, + "typesVersions": { + "*": { + "*": [ + "./dist-types/index.d.ts", + "./dist-types/*" + ] + } + }, + "sideEffects": false, + "files": [ + "dist", + "dist-types", + "src" + ], + "scripts": { + "watch": "vite build --watch", + "build": "vite build", + "lint": "eslint . --ext ts,tsx,.cjs --report-unused-disable-directives --max-warnings 0", + "lint:fix": "eslint . --ext .ts,.cjs --fix --fix-type [problem,suggestion]" + }, + "dependencies": { + "openai": "^4.20.0" + }, + "devDependencies": {} +} \ No newline at end of file diff --git a/web/studio/app/api/completion/yiyan/erniebot.ts b/web/llmapi/src/ernie.ts similarity index 83% rename from web/studio/app/api/completion/yiyan/erniebot.ts rename to web/llmapi/src/ernie.ts index bb2722b..90b8e35 100644 --- a/web/studio/app/api/completion/yiyan/erniebot.ts +++ b/web/llmapi/src/ernie.ts @@ -1,8 +1,11 @@ -import OpenAI, { APIError, OpenAIError } from "openai"; -import { APIClient, type Fetch } from "openai/core"; -import { Stream } from "openai/streaming"; +import OpenAI, { APIError, OpenAIError } from 'openai'; +import { APIClient, type Fetch } from 'openai/core'; +import { Stream } from 'openai/streaming'; -export type ErnieAPIOptions = { +import { APIResource } from './resource'; +import { ensureArray } from './util'; + +export type ErnieAIOptions = { baseURL?: string; token?: string; timeout?: number | undefined; @@ -15,14 +18,14 @@ export type ErnieAPIOptions = { // 之前 AI Studio 的文档是有文档的,但现在不知道去哪了 // 参考: // - https://cloud.baidu.com/doc/WENXINWORKSHOP/s/jlil56u11 -// - https://github.com/PaddlePaddle/ERNIE-Bot-SDK/blob/develop/erniebot/backends/aistudio.py -export class ErnieAPI extends APIClient { +// - https://github.com/PaddlePaddle/ERNIE-Bot-SDK/blob/develop/ErnieAI/backends/aistudio.py +export class ErnieAI extends APIClient { protected token: string; - constructor(options?: ErnieAPIOptions) { + constructor(options?: ErnieAIOptions) { const { - token = process.env.AISTUDIO_ACCESS_TOKEN || "", - baseURL = "https://aistudio.baidu.com/llm/lmapi/v1", + token = process.env.AISTUDIO_ACCESS_TOKEN || '', + baseURL = 'https://aistudio.baidu.com/llm/lmapi/v1', timeout = 30000, fetch = globalThis.fetch, httpAgent = undefined, @@ -37,8 +40,6 @@ export class ErnieAPI extends APIClient { ...rest, }); - // ok(token, "token is required"); - this.token = token; } @@ -55,14 +56,6 @@ export class ErnieAPI extends APIClient { } } -export class APIResource { - protected _client: APIClient; - - constructor(client: APIClient) { - this._client = client; - } -} - export class Chat extends APIResource { completions = new Completions(this._client); } @@ -72,38 +65,38 @@ export class Completions extends APIResource { // 使用模型名称是为了和 OpenAI 的 API 保持一致 // 同时也是为了方便使用 protected resources: Map< - ErnieBot.ChatModel, + ErnieAI.ChatModel, { - id: ErnieBot.ChatModel; + id: ErnieAI.ChatModel; endpoint: string; } > = new Map([ [ - "ernie-bot", + 'ernie-bot', { - id: "ernie-bot", - endpoint: "/chat/completions", + id: 'ernie-bot', + endpoint: '/chat/completions', }, ], [ - "ernie-bot-turbo", + 'ernie-bot-turbo', { - id: "ernie-bot-turbo", - endpoint: "/chat/eb-instant", + id: 'ernie-bot-turbo', + endpoint: '/chat/eb-instant', }, ], [ - "ernie-bot-4", + 'ernie-bot-4', { - id: "ernie-bot-4", - endpoint: "/chat/completions_pro", + id: 'ernie-bot-4', + endpoint: '/chat/completions_pro', }, ], [ - "ernie-bot-8k", + 'ernie-bot-8k', { - id: "ernie-bot-8k", - endpoint: "/chat/ernie_bot_8k", + id: 'ernie-bot-8k', + endpoint: '/chat/ernie_bot_8k', }, ], ]); @@ -127,7 +120,7 @@ export class Completions extends APIResource { OverrideOpenAIChatCompletionCreateParams, options?: OpenAI.RequestOptions ) { - const { model = "ernie-bot", ...body } = this.buildCreateParams(params); + const { model = 'ernie-bot', ...body } = this.buildCreateParams(params); const resource = this.resources.get(model); if (!resource) { @@ -139,7 +132,7 @@ export class Completions extends APIResource { const headers = { ...options?.headers, // Note: 如果是 stream 的话,需要设置 Accept 为 text/event-stream - Accept: stream ? "text/event-stream" : "application/json", + Accept: stream ? 'text/event-stream' : 'application/json', }; const response: Response = await this._client.post(resource.endpoint, { @@ -155,7 +148,7 @@ export class Completions extends APIResource { if (stream) { const controller = new AbortController(); - options?.signal?.addEventListener("abort", () => { + options?.signal?.addEventListener('abort', () => { controller.abort(); }); @@ -172,7 +165,7 @@ export class Completions extends APIResource { protected buildCreateParams( params: OpenAI.ChatCompletionCreateParams & OverrideOpenAIChatCompletionCreateParams - ): ErnieBot.ChatCompletionCreateParams { + ): ErnieAI.ChatCompletionCreateParams { const { messages = [], presence_penalty, user, stop, ...rest } = params; const head = messages[0]; @@ -180,14 +173,14 @@ export class Completions extends APIResource { // 文心一言的 system 是独立字段 //(1)长度限制1024个字符 //(2)如果使用functions参数,不支持设定人设system - const system = head && head.role === "system" ? head.content : undefined; + const system = head && head.role === 'system' ? head.content : undefined; // 移除 system 角色的消息 if (system) { messages.splice(0, 1); } - const data: ErnieBot.ChatCompletionCreateParams = { + const data: ErnieAI.ChatCompletionCreateParams = { ...rest, system, messages, @@ -209,10 +202,6 @@ export class Completions extends APIResource { } } -function ensureArray(value: T | T[]): T[] { - return Array.isArray(value) ? value : [value]; -} - /** * 如果 code 不为 0,抛出 APIError * @@ -265,7 +254,7 @@ function makeAPIError(code: number, message: string) { */ function fromOpenAIStream( model: string, - stream: Stream, + stream: Stream, controller: AbortController ): Stream { async function* iterator(): AsyncIterator< @@ -282,7 +271,7 @@ function fromOpenAIStream( const choice: OpenAI.ChatCompletionChunk.Choice = { index: 0, delta: { - role: "assistant", + role: 'assistant', content: data.result || '', }, finish_reason: null, @@ -291,18 +280,18 @@ function fromOpenAIStream( // TODO 需要确认 is_truncated 是否和 is_end 互斥 // TODO 需要确认 functions 是否响应式不一样 if (data.is_end) { - choice.finish_reason = "stop"; + choice.finish_reason = 'stop'; } else if (data.is_truncated) { - choice.finish_reason = "length"; + choice.finish_reason = 'length'; } else if (data.need_clear_history) { - choice.finish_reason = "content_filter"; + choice.finish_reason = 'content_filter'; } yield { id: data.id, model, choices: [choice], - object: "chat.completion.chunk", + object: 'chat.completion.chunk', created: parseInt(data.created, 10), }; } @@ -317,7 +306,7 @@ function fromOpenAIStream( */ function fromResponse( model: string, - data: ErnieBot.APIResponse + data: ErnieAI.APIResponse ): OpenAI.ChatCompletion { const { errorCode, errorMsg, result } = data; @@ -326,20 +315,20 @@ function fromResponse( const choice: OpenAI.ChatCompletion.Choice = { index: 0, message: { - role: "assistant", + role: 'assistant', content: result.result, }, - finish_reason: "stop", + finish_reason: 'stop', }; // TODO 需要确认 is_truncated 是否和 is_end 互斥 // TODO 需要确认 functions 是否响应式不一样 if (result.is_end) { - choice.finish_reason = "stop"; + choice.finish_reason = 'stop'; } else if (result.is_truncated) { - choice.finish_reason = "length"; + choice.finish_reason = 'length'; } else if (result.need_clear_history) { - choice.finish_reason = "content_filter"; + choice.finish_reason = 'content_filter'; } return { @@ -347,20 +336,20 @@ function fromResponse( model: model, choices: [choice], created: parseInt(result.created, 10), - object: "chat.completion", + object: 'chat.completion', usage: result.usage, }; } // 用于覆盖 OpenAI.ChatCompletionCreateParams 的参数 type OverrideOpenAIChatCompletionCreateParams = { - model: ErnieBot.ChatModel; + model: ErnieAI.ChatModel; disable_search?: boolean | null; enable_citation?: boolean | null; }; // eslint-disable-next-line @typescript-eslint/no-namespace -export namespace ErnieBot { +export namespace ErnieAI { export type ChatModel = | 'ernie-bot' | 'ernie-bot-turbo' @@ -371,7 +360,7 @@ export namespace ErnieBot { /** * 模型名称 */ - model: ErnieBot.ChatModel; + model: ErnieAI.ChatModel; /** * 是否强制关闭实时搜索功能,默认 false,表示不关闭 @@ -478,3 +467,5 @@ export namespace ErnieBot { result: APIResult; }; } + +export default ErnieAI; diff --git a/web/llmapi/src/index.ts b/web/llmapi/src/index.ts new file mode 100644 index 0000000..2537c22 --- /dev/null +++ b/web/llmapi/src/index.ts @@ -0,0 +1,38 @@ +import OpenAI from 'openai'; + +import ErnieAI, { ErnieAIOptions } from './ernie'; +import MinimaxAI, { MinimaxAIOptions } from './minimax'; +import QWenAI, { QWenAIOptions } from './qwen'; +import VYroAI, { VYroAIOptions } from './vyro'; + +export { + ErnieAI, + type ErnieAIOptions, + MinimaxAI, + type MinimaxAIOptions, + OpenAI, + QWenAI, + type QWenAIOptions, + VYroAI, + type VYroAIOptions, +}; + +export { + OpenAIError, + APIError, + APIConnectionError, + APIConnectionTimeoutError, + APIUserAbortError, + NotFoundError, + ConflictError, + RateLimitError, + BadRequestError, + AuthenticationError, + InternalServerError, + PermissionDeniedError, + UnprocessableEntityError, +} from 'openai'; + +export * from './resource'; +export * from './streaming'; +export * from './util'; diff --git a/web/llmapi/src/minimax.ts b/web/llmapi/src/minimax.ts new file mode 100644 index 0000000..b3ecb3d --- /dev/null +++ b/web/llmapi/src/minimax.ts @@ -0,0 +1,584 @@ +import type { Agent } from 'node:http'; + +import OpenAI, { OpenAIError, APIError } from 'openai'; +import { + APIClient, + type DefaultQuery, + type Fetch, + type FinalRequestOptions, + type Headers, +} from 'openai/core'; +import { Stream } from 'openai/streaming'; + +import { APIResource } from './resource'; +import { iterMessages, SSEDecoder } from './streaming'; + +export interface MinimaxAIOptions { + baseURL?: string; + orgId?: string; + apiKey?: string; + timeout?: number | undefined; + httpAgent?: Agent; + fetch?: Fetch | undefined; + defaultHeaders?: Headers; + defaultQuery?: DefaultQuery; +} + +export class MinimaxAI extends APIClient { + protected orgId: string; + protected apiKey: string; + + private _options: MinimaxAIOptions; + + constructor(options: MinimaxAIOptions = {}) { + const { + orgId = process.env.MINIMAX_API_ORG || '', + apiKey = process.env.MINIMAX_API_KEY || '', + baseURL = 'https://api.minimax.chat/v1', + timeout = 30000, + fetch = globalThis.fetch, + httpAgent = undefined, + ...rest + } = options; + + super({ + baseURL, + timeout, + fetch, + httpAgent, + ...rest, + }); + + this._options = options; + + this.apiKey = apiKey; + this.orgId = orgId; + } + + chat = new Chat(this); + + protected authHeaders(): Headers { + return { + Authorization: `Bearer ${this.apiKey}`, + }; + } + + protected override defaultHeaders(opts: FinalRequestOptions): Headers { + return { + ...super.defaultHeaders(opts), + ...this._options.defaultHeaders, + }; + } + + protected override defaultQuery(): DefaultQuery | undefined { + return { + GroupId: this.orgId, + ...this._options.defaultQuery, + }; + } +} + +export class Chat extends APIResource { + completions = new Completions(this._client); +} + +export class Completions extends APIResource { + protected resources: Record< + MinimaxAI.ChatModel, + { + model: MinimaxAI.ChatModel; + endpoint: string; + } + > = { + 'abab5-chat': { + model: 'abab5-chat', + endpoint: '/text/chatcompletion', + }, + 'abab5.5-chat': { + model: 'abab5.5-chat', + endpoint: '/text/chatcompletion', + }, + 'abab5.5-chat-pro': { + model: 'abab5.5-chat', + endpoint: '/text/chatcompletion_pro', + }, + }; + + protected system = + 'MM智能助理是一款由MiniMax自研的,没有调用其他产品的接口的大型语言模型。MiniMax是一家中国科技公司,一直致力于进行大模型相关的研究。'; + + /** + * Creates a model response for the given chat conversation. + * + * See https://api.minimax.chat/document/guides/chat-model/chat/api + */ + create( + body: MinimaxAI.ChatCompletionCreateParamsNonStreaming, + options?: OpenAI.RequestOptions + ): Promise; + create( + body: MinimaxAI.ChatCompletionCreateParamsStreaming, + options?: OpenAI.RequestOptions + ): Promise>; + + async create( + params: MinimaxAI.ChatCompletionCreateParams, + options?: OpenAI.RequestOptions + ) { + const resource = this.resources[params.model]; + + if (!resource) { + throw new OpenAIError(`Invalid model: ${params.model}`); + } + + const body = this.buildCreateParams(params); + + const response: Response = await this._client.post(resource.endpoint, { + ...options, + body: { ...body, model: resource.model }, + stream: false, + __binaryResponse: true, + }); + + if (body.stream) { + const controller = new AbortController(); + + options?.signal?.addEventListener('abort', () => { + controller.abort(); + }); + + return Completions.fromSSEResponse(params.model, response, controller); + } + + return Completions.fromResponse(params.model, await response.json()); + } + + protected buildCreateParams( + params: MinimaxAI.ChatCompletionCreateParams + ): ChatCompletionCreateParams { + const { model, messages = [], max_tokens, ...rest } = params; + + const data: ChatCompletionCreateParams = { + model, + messages: [], + ...rest, + }; + + if (max_tokens) { + data.tokens_to_generate = max_tokens; + } + + const head = messages[0]; + + // minimax 的 system 是独立字段 + const system = head && head.role === 'system' ? head.content : null; + + // 移除 system 角色的消息 + if (system) { + messages.splice(0, 1); + } + + if (model === 'abab5.5-chat-pro') { + data.bot_setting = [ + { + bot_name: 'MM智能助理', + content: system || this.system, + }, + ]; + data.reply_constraints = { + sender_type: 'BOT', + sender_name: 'MM智能助理', + }; + } else { + data.role_meta = { + bot_name: 'MM智能助理', + user_name: '用户', + }; + data.prompt = system || this.system; + } + + data.messages = messages.map((item) => { + switch (item.role) { + case 'assistant': + return { + sender_type: 'BOT', + text: item.content as string, + }; + default: { + const message: ChatMessage = { + sender_type: 'USER', + text: item.content as string, + }; + + if (model == 'abab5.5-chat-pro') { + message.sender_name = '用户'; + } + + return message; + } + } + }); + + if (params.stream) { + data['use_standard_sse'] = true; + } + + return data; + } + + static fromResponse( + model: MinimaxAI.ChatModel, + data: ChatCompletion + ): OpenAI.ChatCompletion { + Completions.assert(data); + + return { + id: data.id, + model: data.model, + choices: data.choices.map((choice, index) => { + const { finish_reason } = choice; + + if (model === 'abab5.5-chat-pro') { + return { + index: index, + message: { + role: 'assistant', + content: choice.messages[0].text, + }, + finish_reason, + }; + } + + return { + index: index, + message: { + role: 'assistant', + content: choice.text, + }, + finish_reason, + }; + }), + created: data.created, + object: 'chat.completion', + usage: data.usage, + }; + } + + static fromSSEResponse( + model: MinimaxAI.ChatModel, + response: Response, + controller: AbortController + ): Stream { + let consumed = false; + const decoder = new SSEDecoder(); + + function transform(data: ChatCompletionChunk): OpenAI.ChatCompletionChunk { + return { + id: data.request_id, + model: model, + choices: data.choices.map((choice, index) => { + const { finish_reason = null } = choice; + + if (model === 'abab5.5-chat-pro') { + const content = choice.messages[0].text; + + return { + index: index, + delta: { + role: 'assistant', + content: finish_reason === 'stop' ? '' : content, + }, + finish_reason: finish_reason, + }; + } + + return { + index: index, + delta: { + role: 'assistant', + content: choice.delta, + }, + finish_reason: finish_reason, + }; + }), + object: 'chat.completion.chunk', + created: data.created, + }; + } + + async function* iterator(): AsyncIterator< + OpenAI.ChatCompletionChunk, + any, + undefined + > { + if (consumed) { + throw new Error( + 'Cannot iterate over a consumed stream, use `.tee()` to split the stream.' + ); + } + consumed = true; + let done = false; + try { + for await (const sse of iterMessages(response, decoder, controller)) { + if (done) continue; + + if (sse.data.startsWith('[DONE]')) { + done = true; + continue; + } + + if (sse.event === null) { + let data; + + try { + data = JSON.parse(sse.data); + } catch (e) { + console.error(`Could not parse message into JSON:`, sse.data); + console.error(`From chunk:`, sse.raw); + throw e; + } + + if (data && data.code) { + throw new APIError(undefined, data, undefined, undefined); + } + + yield transform(data); + } + } + done = true; + } catch (e) { + // If the user calls `stream.controller.abort()`, we should exit without throwing. + if (e instanceof Error && e.name === 'AbortError') return; + throw e; + } finally { + // If the user `break`s, abort the ongoing request. + if (!done) controller.abort(); + } + } + + return new Stream(iterator, controller); + } + + static assert(data: ChatCompletion) { + if (data.base_resp.status_code === 0) return; + + const error = { + code: data.base_resp.status_code, + message: data.base_resp.status_msg, + }; + + throw new APIError(undefined, error, undefined, undefined); + } +} + +type ChatMessage = { + sender_type: 'USER' | 'BOT' | 'FUNCTION'; + sender_name?: string; + text: string; +}; + +interface ChatCompletionCreateParams { + /** + * 模型名称 + */ + model: MinimaxAI.ChatModel; + + /** + * 对话背景、人物或功能设定 + * + * 和 bot_setting 互斥 + */ + prompt?: string | null; + + /** + * 对话 meta 信息 + * + * 和 bot_setting 互斥 + */ + role_meta?: { + /** + * 用户代称 + */ + user_name: string; + /** + * AI 代称 + */ + bot_name: string; + }; + + /** + * pro 模式下,可以设置 bot 的名称和内容 + * + * 和 prompt 互斥 + */ + bot_setting?: { + bot_name: string; + content: string; + }[]; + + /** + * pro 模式下,设置模型回复要求 + */ + reply_constraints?: { + sender_type: string; + sender_name: string; + }; + + /** + * 对话内容 + */ + messages: ChatMessage[]; + + /** + * 如果为 true,则表明设置当前请求为续写模式,回复内容为传入 messages 的最后一句话的续写; + * + * 此时最后一句发送者不限制 USER,也可以为 BOT。 + */ + continue_last_message?: boolean | null; + + /** + * 内容随机性 + */ + temperature?: number | null; + + /** + * 生成文本的多样性 + */ + top_p?: number | null; + + /** + * 最大生成token数,需要注意的是,这个参数并不会影响模型本身的生成效果, + * + * 而是仅仅通过以截断超出的 token 的方式来实现功能需要保证输入上文的 token 个数和这个值加一起小于 6144 或者 16384,否则请求会失败 + */ + tokens_to_generate?: number | null; + + /** + * 对输出中易涉及隐私问题的文本信息进行脱敏, + * + * 目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认 false,即开启脱敏 + */ + skip_info_mask?: boolean | null; + + /** + * 对输出中易涉及隐私问题的文本信息进行打码, + * + * 目前包括但不限于邮箱、域名、链接、证件号、家庭住址等,默认true,即开启打码 + */ + mask_sensitive_info?: boolean | null; + + /** + * 生成多少个结果;不设置默认为1,最大不超过4。 + * + * 由于 beam_width 生成多个结果,会消耗更多 token。 + */ + beam_width?: number | null; + + /** + * 是否以流式接口的形式返回数据,默认 false + */ + stream?: boolean | null; + + /** + * 是否使用标准 SSE 格式,设置为 true 时, + * 流式返回的结果将以两个换行为分隔符。 + * + * 只有在 stream=true 时,此参数才会生效。 + */ + use_standard_sse?: boolean | null; +} + +type ChatCompletionChoice = { + index?: number; + text: string; + messages: { + sender_type: 'BOT'; + sender_name: string; + text: string; + }[]; + finish_reason: + | 'stop' + | 'length' + | 'tool_calls' + | 'content_filter' + | 'function_call'; +}; + +interface ChatCompletion { + id: string; + created: number; + model: MinimaxAI.ChatModel; + reply: string; + choices: ChatCompletionChoice[]; + usage: { + /** + * Number of tokens in the generated completion. + */ + completion_tokens: number; + + /** + * Number of tokens in the prompt. + */ + prompt_tokens: number; + + /** + * Total number of tokens used in the request (prompt + completion). + */ + total_tokens: number; + }; + input_sensitive: boolean; + output_sensitive: boolean; + base_resp: { + status_code: number; + status_msg: string; + }; +} + +type ChatCompletionChunkChoice = { + index: number; + delta: string; + messages: { + sender_type: 'BOT'; + sender_name: string; + text: string; + }[]; + finish_reason: 'stop' | 'length' | 'content_filter' | 'function_call' | null; +}; + +interface ChatCompletionChunk { + request_id: string; + created: number; + model: MinimaxAI.ChatModel; + reply: string; + choices: ChatCompletionChunkChoice[]; + usage: { + total_tokens: number; + }; + input_sensitive: false; + output_sensitive: false; + base_resp: { + status_code: number; + status_msg: string; + }; +} + +// eslint-disable-next-line @typescript-eslint/no- +export namespace MinimaxAI { + export type ChatModel = 'abab5-chat' | 'abab5.5-chat' | 'abab5.5-chat-pro'; + + export interface ChatCompletionCreateParamsNonStreaming + extends OpenAI.ChatCompletionCreateParamsNonStreaming { + model: ChatModel; + } + + export interface ChatCompletionCreateParamsStreaming + extends OpenAI.ChatCompletionCreateParamsStreaming { + model: ChatModel; + } + + export type ChatCompletionCreateParams = + | ChatCompletionCreateParamsNonStreaming + | ChatCompletionCreateParamsStreaming; +} + +export default MinimaxAI; diff --git a/web/studio/app/api/completion/qwen/qwen.ts b/web/llmapi/src/qwen.ts similarity index 88% rename from web/studio/app/api/completion/qwen/qwen.ts rename to web/llmapi/src/qwen.ts index 0f4a534..d68434e 100644 --- a/web/studio/app/api/completion/qwen/qwen.ts +++ b/web/llmapi/src/qwen.ts @@ -1,6 +1,6 @@ -import OpenAI, { APIError, OpenAIError } from "openai"; -import { APIClient, type Fetch } from "openai/core"; -import { Stream } from "openai/streaming"; +import OpenAI, { APIError, OpenAIError } from 'openai'; +import { APIClient, type Fetch } from 'openai/core'; +import { Stream } from 'openai/streaming'; import { SSEDecoder, @@ -8,7 +8,8 @@ import { LineDecoder, type ServerSentEvent, type Bytes, -} from "./streaming"; +} from './streaming'; +import { APIResource } from './resource'; export type QWenAIOptions = { baseURL?: string; @@ -24,8 +25,8 @@ export class QWenAI extends APIClient { constructor(options?: QWenAIOptions) { const { - apiKey = process.env.QWEN_API_KEY || "", - baseURL = "https://dashscope.aliyuncs.com/api/v1", + apiKey = process.env.QWEN_API_KEY || '', + baseURL = 'https://dashscope.aliyuncs.com/api/v1', timeout = 30000, fetch = globalThis.fetch, httpAgent = undefined, @@ -56,14 +57,6 @@ export class QWenAI extends APIClient { } } -export class APIResource { - protected _client: APIClient; - - constructor(client: APIClient) { - this._client = client; - } -} - export class Chat extends APIResource { completions = new Completions(this._client); } @@ -93,13 +86,13 @@ export class Completions extends APIResource { const headers = { ...options?.headers, // Note: 如果是 stream 的话,需要设置 Accept 为 text/event-stream - Accept: stream ? "text/event-stream" : "application/json", + Accept: stream ? 'text/event-stream' : 'application/json', }; const body = this.buildCreateParams(rest); const response: Response = await this._client.post( - "/services/aigc/text-generation/generation", + '/services/aigc/text-generation/generation', { ...options, // @ts-expect-error 类型冲突? @@ -115,7 +108,7 @@ export class Completions extends APIResource { if (stream) { const controller = new AbortController(); - options?.signal?.addEventListener("abort", () => { + options?.signal?.addEventListener('abort', () => { controller.abort(); }); @@ -138,12 +131,16 @@ export class Completions extends APIResource { }, parameters: { ...rest, - result_format: "text", // 强制使用 text 格式 - incremental_output: true, + result_format: 'text', // 强制使用 text 格式 repetition_penalty: presence_penalty, }, }; + // 非 stream 不能启用这个 + if (params.stream) { + data.parameters.incremental_output = true; + } + return data; } @@ -195,7 +192,7 @@ export class Completions extends APIResource { > { if (consumed) { throw new Error( - "Cannot iterate over a consumed stream, use `.tee()` to split the stream." + 'Cannot iterate over a consumed stream, use `.tee()` to split the stream.' ); } consumed = true; @@ -204,12 +201,12 @@ export class Completions extends APIResource { for await (const sse of iterMessages()) { if (done) continue; - if (sse.data.startsWith("[DONE]")) { + if (sse.data.startsWith('[DONE]')) { done = true; continue; } - if (sse.event === "result") { + if (sse.event === 'result') { let data; try { @@ -227,15 +224,15 @@ export class Completions extends APIResource { const choice: OpenAI.ChatCompletionChunk.Choice = { index: 0, delta: { - role: "assistant", - content: data.output.text || "", + role: 'assistant', + content: data.output.text || '', }, finish_reason: null, }; const finish_reason = data.output.finish_reason; - if (finish_reason !== "null") { + if (finish_reason !== 'null') { choice.finish_reason = finish_reason; } @@ -243,7 +240,7 @@ export class Completions extends APIResource { id: data.request_id, model, choices: [choice], - object: "chat.completion.chunk", + object: 'chat.completion.chunk', created: Date.now() / 1000, }; } @@ -251,7 +248,7 @@ export class Completions extends APIResource { done = true; } catch (e) { // If the user calls `stream.controller.abort()`, we should exit without throwing. - if (e instanceof Error && e.name === "AbortError") return; + if (e instanceof Error && e.name === 'AbortError') return; throw e; } finally { // If the user `break`s, abort the ongoing request. @@ -266,7 +263,7 @@ export class Completions extends APIResource { model: string, resp: QWenAI.APIResponse ): OpenAI.ChatCompletion { - if ("code" in resp) { + if ('code' in resp) { throw new APIError(undefined, resp, undefined, undefined); } @@ -275,7 +272,7 @@ export class Completions extends APIResource { const choice: OpenAI.ChatCompletion.Choice = { index: 0, message: { - role: "assistant", + role: 'assistant', content: output.text, }, finish_reason: output.finish_reason, @@ -286,7 +283,7 @@ export class Completions extends APIResource { model: model, choices: [choice], created: Date.now() / 1000, - object: "chat.completion", + object: 'chat.completion', usage: { completion_tokens: usage.output_tokens, prompt_tokens: usage.input_tokens, @@ -305,7 +302,11 @@ type OverrideOpenAIChatCompletionCreateParams = { // eslint-disable-next-line @typescript-eslint/no-namespace export namespace QWenAI { - export type ChatModel = 'qwen-turbo' | 'qwen-plus' | 'qwen-max'; + export type ChatModel = + | 'qwen-turbo' + | 'qwen-plus' + | 'qwen-max' + | 'qwen-max-longcontext'; /** * - text 旧版本的 text diff --git a/web/llmapi/src/resource.ts b/web/llmapi/src/resource.ts new file mode 100644 index 0000000..31a75a5 --- /dev/null +++ b/web/llmapi/src/resource.ts @@ -0,0 +1,9 @@ +import { APIClient } from "openai/core"; + +export class APIResource { + protected _client: Client; + + constructor(client: Client) { + this._client = client; + } +} \ No newline at end of file diff --git a/web/studio/app/api/completion/qwen/streaming.ts b/web/llmapi/src/streaming.ts similarity index 71% rename from web/studio/app/api/completion/qwen/streaming.ts rename to web/llmapi/src/streaming.ts index 8e4b50b..4b85853 100644 --- a/web/studio/app/api/completion/qwen/streaming.ts +++ b/web/llmapi/src/streaming.ts @@ -1,11 +1,43 @@ -import { OpenAIError } from 'openai' +import { OpenAIError } from 'openai'; -export type Bytes = string | ArrayBuffer | Uint8Array | Buffer | null | undefined +export type Bytes = + | string + | ArrayBuffer + | Uint8Array + | Buffer + | null + | undefined; export type ServerSentEvent = { - event: string | null - data: string - raw: string[] + event: string | null; + data: string; + raw: string[]; +}; + +export async function* iterMessages( + response: Response, + decoder: SSEDecoder, + controller: AbortController +): AsyncGenerator { + if (!response.body) { + controller.abort(); + throw new OpenAIError(`Attempted to iterate over a response with no body`); + } + + const lineDecoder = new LineDecoder(); + + const iter = readableStreamAsyncIterable(response.body); + for await (const chunk of iter) { + for (const line of lineDecoder.decode(chunk)) { + const sse = decoder.decode(line); + if (sse) yield sse; + } + } + + for (const line of lineDecoder.flush()) { + const sse = decoder.decode(line); + if (sse) yield sse; + } } export class SSEDecoder { @@ -47,8 +79,8 @@ export class SSEDecoder { return null; } - // eslint-disable-next-line prefer-const - let [fieldname, , value] = partition(line, ':'); + // eslint-disable-next-line @typescript-eslint/no-unused-vars, prefer-const + let [fieldname, _, value] = partition(line, ':'); if (value.startsWith(' ')) { value = value.substring(1); @@ -73,7 +105,6 @@ export class SSEDecoder { export class LineDecoder { // prettier-ignore static NEWLINE_CHARS = new Set(['\n', '\r', '\x0b', '\x0c', '\x1c', '\x1d', '\x1e', '\x85', '\u2028', '\u2029']); - // eslint-disable-next-line no-control-regex static NEWLINE_REGEXP = /\r\n|[\n\r\x0b\x0c\x1c\x1d\x1e\x85\u2028\u2029]/g; buffer: string[]; @@ -173,12 +204,16 @@ export class LineDecoder { } function partition(str: string, delimiter: string): [string, string, string] { - const index = str.indexOf(delimiter) + const index = str.indexOf(delimiter); if (index !== -1) { - return [str.substring(0, index), delimiter, str.substring(index + delimiter.length)] + return [ + str.substring(0, index), + delimiter, + str.substring(index + delimiter.length), + ]; } - return [str, '', ''] + return [str, '', '']; } /** @@ -187,29 +222,31 @@ function partition(str: string, delimiter: string): [string, string, string] { * * This polyfill was pulled from https://github.com/MattiasBuelens/web-streams-polyfill/pull/122#issuecomment-1627354490 */ -export function readableStreamAsyncIterable(stream: any): AsyncIterableIterator { - if (stream[Symbol.asyncIterator]) return stream +export function readableStreamAsyncIterable( + stream: any +): AsyncIterableIterator { + if (stream[Symbol.asyncIterator]) return stream; - const reader = stream.getReader() + const reader = stream.getReader(); return { async next() { try { - const result = await reader.read() - if (result?.done) reader.releaseLock() // release lock when stream becomes closed - return result + const result = await reader.read(); + if (result?.done) reader.releaseLock(); // release lock when stream becomes closed + return result; } catch (e) { - reader.releaseLock() // release lock when stream becomes errored - throw e + reader.releaseLock(); // release lock when stream becomes errored + throw e; } }, async return() { - const cancelPromise = reader.cancel() - reader.releaseLock() - await cancelPromise - return { done: true, value: undefined } + const cancelPromise = reader.cancel(); + reader.releaseLock(); + await cancelPromise; + return { done: true, value: undefined }; }, [Symbol.asyncIterator]() { - return this + return this; }, - } + }; } diff --git a/web/llmapi/src/util.ts b/web/llmapi/src/util.ts new file mode 100644 index 0000000..4da9ae3 --- /dev/null +++ b/web/llmapi/src/util.ts @@ -0,0 +1,3 @@ +export function ensureArray(value: T | T[]): T[] { + return Array.isArray(value) ? value : [value]; +} diff --git a/web/llmapi/src/vyro.ts b/web/llmapi/src/vyro.ts new file mode 100644 index 0000000..929f520 --- /dev/null +++ b/web/llmapi/src/vyro.ts @@ -0,0 +1,698 @@ +import type { Agent } from 'node:http'; +import { ReadableStream } from 'node:stream/web'; + +import { toFile } from 'openai/uploads'; +import { + APIClient, + type DefaultQuery, + type Fetch, + type Headers, + type RequestOptions, + type Uploadable, +} from 'openai/core'; + +import { APIResource } from './resource'; + +export interface VYroAIOptions { + baseURL?: string; + apiKey?: string; + timeout?: number | undefined; + httpAgent?: Agent; + apiType?: (string & NonNullable) | 'api'; + fetch?: Fetch | undefined; + defaultHeaders?: Headers; + defaultQuery?: DefaultQuery; +} + +export class VYroAI extends APIClient { + public apiType: (string & NonNullable) | 'api'; + + protected apiKey: string; + private _options: VYroAIOptions; + + constructor(options: VYroAIOptions = {}) { + const { + apiKey = process.env.VYRO_API_KEY || '', + apiType = process.env.VYRO_API_TYPE || 'api', + baseURL = 'https://api.vyro.ai/v1', + timeout = 30000, + fetch = globalThis.fetch, + httpAgent = undefined, + ...rest + } = options; + + super({ + baseURL, + timeout, + fetch, + httpAgent, + ...rest, + }); + + this._options = options; + + this.apiKey = apiKey; + this.apiType = apiType; + } + + images = new Images(this); + + protected override authHeaders() { + return { + Authorization: `Bearer ${this.apiKey}`, + }; + } + + protected override defaultHeaders(): Headers { + return { + ...this.authHeaders(), + ...this._options.defaultHeaders, + }; + } + + protected override defaultQuery(): DefaultQuery | undefined { + return this._options.defaultQuery; + } +} + +export class Images extends APIResource { + protected models: Record = { + 'imagine-v5': 33, + 'anime-v5': 34, + 'imagine-v4.1': 32, + 'imagine-v4': 31, + 'imagine-v3': 30, + 'imagine-v1': 28, + realistic: 29, + anime: 21, + portrait: 26, + 'sdxl-1.0': 122, + }; + + /** + * Creates a variation of a given image. + */ + async createVariation( + params: VYroAI.ImageCreateVariationParams, + options?: RequestOptions + ): Promise { + const client = this._client; + + const formData = new FormData(); + + const { model, style = this.models[model ?? 'realistic'] } = params; + + // @ts-expect-error + formData.append('image', await toFile(params.image)); + formData.append('style_id', (style || 29).toString()); + formData.append('prompt', params.prompt); + formData.append('negative_prompt', params.negative_prompt || ''); + formData.append('strength', (params.strength || 0).toString()); + formData.append('steps', (params.steps || 30).toString()); + formData.append('cfg', (params.cfg || 7.5).toString()); + + if (params.seed) { + formData.append('seed', params.seed.toString()); + } + + const response: Response = await client.post( + `/imagine/${client.apiType}/generations/variations`, + { + ...options, + // @ts-expect-error + body: { + body: formData, + [Symbol.toStringTag]: 'MultipartBody', + }, + __binaryResponse: true, + } + ); + + return { + data: [ + { + binary: response.body as unknown as ReadableStream, + }, + ], + created: Math.floor(Date.now() / 1000), + }; + } + + /** + * Experience the magic of Imagine's Image Remix feature, designed to breathe new life into your existing images. + */ + async edit( + params: VYroAI.ImageEditParams, + options?: RequestOptions + ): Promise { + const client = this._client; + + const formData = new FormData(); + + const { model, style = this.models[model ?? 'realistic'] } = params; + + // @ts-expect-error + formData.append('image', await toFile(params.image)); + formData.append('style_id', (style || 29).toString()); + formData.append('prompt', params.prompt); + formData.append('negative_prompt', params.negative_prompt || ''); + formData.append('strength', (params.strength || 0).toString()); + formData.append('control', params.control || 'openpose'); + formData.append('steps', (params.steps || 30).toString()); + formData.append('cfg', (params.cfg || 7.5).toString()); + + if (params.seed) { + formData.append('seed', params.seed.toString()); + } + + const response: Response = await client.post( + `/imagine/${client.apiType}/edits/remix`, + { + ...options, + // @ts-expect-error + body: { + body: formData, + [Symbol.toStringTag]: 'MultipartBody', + }, + __binaryResponse: true, + } + ); + + return { + data: [ + { + binary: response.body as unknown as ReadableStream, + }, + ], + created: Math.floor(Date.now() / 1000), + }; + } + + /** + * Creates an image given a prompt. + */ + async generate( + params: VYroAI.ImageGenerateParams, + options?: RequestOptions + ): Promise { + const client = this._client; + + const formData = new FormData(); + + const { model, style = this.models[model ?? 'imagine-v4'] } = params; + + formData.append('style_id', (style || 30).toString()); + formData.append('prompt', params.prompt); + formData.append('negative_prompt', params.negative_prompt || ''); + formData.append('aspect_ratio', params.aspect_ratio || '1:1'); + formData.append('steps', (params.steps || 30).toString()); + formData.append('cfg', (params.cfg || 7.5).toString()); + formData.append('high_res_results', params.quality === 'hd' ? '1' : '0'); + + if (params.seed) { + formData.append('seed', params.seed.toString()); + } + + const response: Response = await client.post( + `/imagine/${client.apiType}/generations`, + { + ...options, + // @ts-expect-error + body: { + body: formData, + [Symbol.toStringTag]: 'MultipartBody', + }, + __binaryResponse: true, + } + ); + + return { + created: Math.floor(Date.now() / 1000), + data: [ + { + binary: response.body as unknown as ReadableStream, + }, + ], + }; + } + + /** + * The image upscale feature provides a better image to the user by increasing its resolution. + */ + async upscale( + params: VYroAI.ImageUpscaleParams, + options?: RequestOptions + ): Promise { + const client = this._client; + + const formData = new FormData(); + + // @ts-expect-error + formData.append('image', await toFile(params.image)); + + const response: Response = await client.post( + `/imagine/${client.apiType}/upscale`, + { + ...options, + // @ts-expect-error + body: { + body: formData, + [Symbol.toStringTag]: 'MultipartBody', + }, + __binaryResponse: true, + } + ); + + return { + created: Math.floor(Date.now() / 1000), + data: [ + { + binary: response.body as unknown as ReadableStream, + }, + ], + }; + } + + /** + * Inpaint is an advanced feature of the Text-to-Image Stable Diffusion pipeline. + * It allows users to remove unwanted objects or elements from an image by intelligently filling in the missing areas. + */ + async restoration( + params: VYroAI.ImageRestorationParams, + options?: RequestOptions + ): Promise { + const client = this._client; + + const formData = new FormData(); + + // @ts-expect-error + formData.append('image', await toFile(params.image)); + // @ts-expect-error + formData.append('mask', await toFile(params.mask)); + formData.append('style_id', '1'); + formData.append('prompt', params.prompt); + formData.append('neg_prompt', params.negative_prompt || ''); + formData.append('inpaint_strength', (params.strength || 0).toString()); + formData.append('cfg', (params.cfg || 7.5).toString()); + + const response: Response = await client.post( + `/imagine/${client.apiType}/generations/variations`, + { + ...options, + // @ts-expect-error + body: { + body: formData, + [Symbol.toStringTag]: 'MultipartBody', + }, + __binaryResponse: true, + } + ); + + return { + data: [ + { + binary: response.body as unknown as ReadableStream, + }, + ], + created: Math.floor(Date.now() / 1000), + }; + } +} + +// eslint-disable-next-line @typescript-eslint/no- +export namespace VYroAI { + export type ImageModel = + | 'imagine-v5' + | 'anime-v5' + | 'imagine-v4.1' + | 'imagine-v4' + | 'imagine-v3' + | 'imagine-v1' + | 'realistic' + | 'anime' + | 'portrait' + | 'sdxl-1.0'; + + export interface ImageRestorationParams { + /** + * The image to use as the basis for the variation(s). Must be a valid PNG file, + * less than 4MB, and square. + */ + image: Uploadable; + + /** + * The mask indicating the areas to be inpainted. + */ + mask: Uploadable; + + /** + * The text guides the image generation. + */ + prompt: string; + + /** + * The model to use for image generation. + */ + model?: 'vyro-inpaint' | null; + + /** + * The negative_prompt parameter empowers you to provide additional + * guidance to the AI by specifying what you don't want in the image. + * It helps refine the creative direction, ensuring that the generated + * content aligns with your intentions. + */ + negative_prompt?: string | null; + + /** + * Specifies the model to be used. Currently supports only 1 for realism. + * + * @defaultValue 1 + */ + style?: 1 | null; + + /** + * Weightage to be given to text + * + * Range: 3 - 15 + * + * @defaultValue 7.5 + */ + cfg?: number | null; + + /** + * Weightage given to initial image. Greater this parameter more the output will be close to starting image and far from prompt. + * + * Range: 0 - 1 + * + * @defaultValue 0.5 + */ + strength?: number | null; + + /** + * 目前仅支持 binary 格式 + */ + response_format?: 'binary' | null; + } + + export interface ImageCreateVariationParams { + /** + * The image to use as the basis for the variation(s). Must be a valid PNG file, + * less than 4MB, and square. + */ + image: Uploadable; + + /** + * The text guides the image generation. + */ + prompt: string; + + /** + * The model to use for image generation. + */ + model?: ImageModel | null; + + /** + * The negative_prompt parameter empowers you to provide additional + * guidance to the AI by specifying what you don't want in the image. + * It helps refine the creative direction, ensuring that the generated + * content aligns with your intentions. + */ + negative_prompt?: string | null; + + /** + * The style_id parameter is like choosing an artistic palette for your image. + * By selecting a style id, you guide the AI in crafting the image with a particular visual aesthetic. + * Style IDs range from 1 to N, each representing a unique artistic style. + * + * @defaultValue 30 + */ + style?: number | null; + + /** + * The steps parameter defines the number of operations or iterations that the + * generator will perform during image creation. It can impact the complexity + * and detail of the generated image. + * + * Range: 30-50 + * + * @defaultValue 30 + */ + steps?: number | null; + + /** + * The cfg parameter acts as a creative control knob. + * You can adjust it to fine-tune the level of artistic innovation in the image. + * Lower values encourage faithful execution of the prompt, + * while higher values introduce more creative and imaginative variations. + * + * Range: 3 - 15 + * + * @defaultValue 7.5 + */ + cfg?: number | null; + + /** + * The seed parameter serves as the initial value for the random number generator. + * By setting a specific seed value, you can ensure that the AI generates the same + * image or outcome each time you use that exact seed. + * + * range: 1-Infinity + */ + seed?: number | null; + + /** + * Influences the impact of the control image on output. + * + * Range: 0 - 1 + * + * @defaultValue 0 + */ + strength?: number | null; + + /** + * 目前仅支持 binary 格式 + */ + response_format?: 'binary' | null; + } + + export interface ImageEditParams { + /** + * The image to use as the basis for the variation(s). Must be a valid PNG file, + * less than 4MB, and square. + */ + image: Uploadable; + + /** + * The text guides the image generation. + */ + prompt: string; + + /** + * The model to use for image generation. + */ + model?: ImageModel | null; + + /** + * The negative_prompt parameter empowers you to provide additional + * guidance to the AI by specifying what you don't want in the image. + * It helps refine the creative direction, ensuring that the generated + * content aligns with your intentions. + */ + negative_prompt?: string | null; + + /** + * The style_id parameter is like choosing an artistic palette for your image. + * By selecting a style id, you guide the AI in crafting the image with a particular visual aesthetic. + * Style IDs range from 1 to N, each representing a unique artistic style. + * + * @defaultValue 29 + */ + style?: number | null; + + /** + * The steps parameter defines the number of operations or iterations that the + * generator will perform during image creation. It can impact the complexity + * and detail of the generated image. + * + * Range: 30-50 + * + * @defaultValue 30 + */ + steps?: number | null; + + /** + * The cfg parameter acts as a creative control knob. + * You can adjust it to fine-tune the level of artistic innovation in the image. + * Lower values encourage faithful execution of the prompt, + * while higher values introduce more creative and imaginative variations. + * + * Range: 3 - 15 + * + * @defaultValue 7.5 + */ + cfg?: number | null; + + /** + * The seed parameter serves as the initial value for the random number generator. + * By setting a specific seed value, you can ensure that the AI generates the same + * image or outcome each time you use that exact seed. + * + * range: 1-Infinity + */ + seed?: number | null; + + /** + * Influences the impact of the control image on output. + * + * Range: 0 - 1 + * + * @defaultValue 0 + */ + strength?: number | null; + + /** + * The method/control used to guide image generation. + * + * @defaultValue openpose + */ + control?: 'openpose' | 'scribble' | 'canny' | 'lineart' | 'depth' | null; + + /** + * 目前仅支持 binary 格式 + */ + response_format?: 'binary' | null; + } + + export interface ImageGenerateParams { + /** + * A prompt is the text input that guides the AI in generating visual content. + * It defines the textual description or concept for the image you wish to generate. + * Think of it as the creative vision you want the AI to bring to life. + * Crafting clear and creative prompts is crucial for achieving the desired results with Imagine's API. + * For example, A serene forest with a river under the moonlight, can be a prompt. + */ + prompt: string; + + /** + * The model to use for image generation. + */ + model?: ImageModel | null; + + /** + * The negative_prompt parameter empowers you to provide additional + * guidance to the AI by specifying what you don't want in the image. + * It helps refine the creative direction, ensuring that the generated + * content aligns with your intentions. + */ + negative_prompt?: string | null; + + /** + * The aspect_ratio parameter allows you to specify the proportions and dimensions of the generated image. + * You can set it to different ratios like 1:1 for square images, 16:9 for widescreen, or 3:4 for vertical formats, + * shaping the visual composition to your liking. + * + * @defaultValue 1:1 + */ + aspect_ratio?: '1:1' | '3:2' | '4:3' | '3:4' | '16:9' | '9:16' | null; + + /** + * The quality parameter is a flag that, when set to hd, + * requests high-resolution results from the AI. + * + * @defaultValue standard + */ + quality?: 'standard' | 'hd'; + + /** + * The style_id parameter is like choosing an artistic palette for your image. + * By selecting a style id, you guide the AI in crafting the image with a particular visual aesthetic. + * Style IDs range from 1 to N, each representing a unique artistic style. + * + * @defaultValue 30 + */ + style?: number | null; + + /** + * The steps parameter defines the number of operations or iterations that the + * generator will perform during image creation. It can impact the complexity + * and detail of the generated image. + * + * Range: 30-50 + * + * @defaultValue 30 + */ + steps?: number | null; + + /** + * The cfg parameter acts as a creative control knob. + * You can adjust it to fine-tune the level of artistic innovation in the image. + * Lower values encourage faithful execution of the prompt, + * while higher values introduce more creative and imaginative variations. + * + * Range: 3 - 15 + * + * @defaultValue 7.5 + */ + cfg?: number | null; + + /** + * The seed parameter serves as the initial value for the random number generator. + * By setting a specific seed value, you can ensure that the AI generates the same + * image or outcome each time you use that exact seed. + * + * range: 1-Infinity + */ + seed?: number | null; + + /** + * 目前仅支持 binary 格式 + */ + response_format?: 'binary' | null; + } + + export interface ImageUpscaleParams { + /** + * The image to use as the basis for the variation(s). Must be a valid PNG file, + * less than 4MB, and square. + */ + image: Uploadable; + } + + export interface Image { + /** + * The binary of the generated image. + */ + binary?: ReadableStream; + + /** + * The base64-encoded JSON of the generated image, if `response_format` is + * `b64_json`. + */ + b64_json?: string; + + /** + * The prompt that was used to generate the image, if there was any revision to the + * prompt. + */ + revised_prompt?: string; + + /** + * The URL of the generated image, if `response_format` is `url` (default). + */ + url?: string; + } + + export interface ImagesResponse { + /** + * When the request was made. + */ + created: number; + + /** + * The generated images. + */ + data: Image[]; + } +} + +export default VYroAI; diff --git a/web/llmapi/tsconfig.json b/web/llmapi/tsconfig.json new file mode 100644 index 0000000..cd9a232 --- /dev/null +++ b/web/llmapi/tsconfig.json @@ -0,0 +1,30 @@ +{ + "compilerOptions": { + "target": "ES2020", + "useDefineForClassFields": true, + "lib": [ + "ES2020" + ], + "module": "ESNext", + "skipLibCheck": true, + /* Bundler mode */ + "moduleResolution": "bundler", + "allowImportingTsExtensions": true, + "resolveJsonModule": true, + "isolatedModules": true, + "noEmit": true, + /* Linting */ + // "strict": true, + "noUnusedLocals": true, + "noUnusedParameters": true, + "noFallthroughCasesInSwitch": true + }, + "include": [ + "src" + ], + "references": [ + { + "path": "./tsconfig.node.json" + } + ] +} \ No newline at end of file diff --git a/web/llmapi/tsconfig.node.json b/web/llmapi/tsconfig.node.json new file mode 100644 index 0000000..a858353 --- /dev/null +++ b/web/llmapi/tsconfig.node.json @@ -0,0 +1,10 @@ +{ + "compilerOptions": { + "composite": true, + "skipLibCheck": true, + "module": "ESNext", + "moduleResolution": "bundler", + "allowSyntheticDefaultImports": true + }, + "include": ["vite.config.mts"] +} diff --git a/web/llmapi/vite.config.mts b/web/llmapi/vite.config.mts new file mode 100644 index 0000000..68f94ec --- /dev/null +++ b/web/llmapi/vite.config.mts @@ -0,0 +1,32 @@ +import { defineConfig } from 'vite'; +import checker from 'vite-plugin-checker'; +import dts from 'vite-plugin-dts'; +import { externalizeDeps } from 'vite-plugin-externalize-deps'; + +// https://vitejs.dev/config/ +export default defineConfig({ + plugins: [ + checker({ + typescript: true, + }), + externalizeDeps(), + dts({ + outDir: './dist-types', + }), + ], + build: { + copyPublicDir: false, + lib: { + entry: 'src/index.ts', + formats: ['es'], + }, + rollupOptions: { + output: { + dir: 'dist', + exports: 'named', + entryFileNames: '[name].mjs', + chunkFileNames: '[name].mjs', + }, + }, + }, +}); diff --git a/web/studio/app/api/completion/minimax/route.ts b/web/studio/app/api/completion/minimax/route.ts new file mode 100644 index 0000000..b11b02b --- /dev/null +++ b/web/studio/app/api/completion/minimax/route.ts @@ -0,0 +1,33 @@ +import { OpenAIStream, StreamingTextResponse } from 'ai'; + +import { MinimaxAI } from '@studio-b3/llmapi'; + +// See https://api.minimax.chat/user-center/basic-information/interface-key +const api = new MinimaxAI({ + orgId: process.env.MINIMAX_API_ORG, + apiKey: process.env.MINIMAX_API_KEY, +}); + +// export const runtime = 'edge'; + +export async function POST(req: Request) { + const { prompt } = await req.json(); + + const response = await api.chat.completions.create({ + model: 'abab5.5-chat', + stream: true, + temperature: 0.6, + messages: [ + { + role: 'user', + content: prompt, + }, + ], + }); + + // Convert the response into a friendly text-stream + const stream = OpenAIStream(response); + + // Respond with the stream + return new StreamingTextResponse(stream); +} diff --git a/web/studio/app/api/completion/qwen/route.ts b/web/studio/app/api/completion/qwen/route.ts index c521259..5b708af 100644 --- a/web/studio/app/api/completion/qwen/route.ts +++ b/web/studio/app/api/completion/qwen/route.ts @@ -1,6 +1,6 @@ import { OpenAIStream, StreamingTextResponse } from "ai"; -import QWenAI from "./qwen"; +import { QWenAI } from "@studio-b3/llmapi"; const api = new QWenAI({ // https://help.aliyun.com/zh/dashscope/developer-reference/activate-dashscope-and-create-an-api-key @@ -13,7 +13,7 @@ export async function POST(req: Request) { const { prompt } = await req.json(); const response = await api.chat.completions.create({ - model: "qwen-turbo", + model: "qwen-max", stream: true, temperature: 0.6, messages: [ diff --git a/web/studio/app/api/completion/yiyan/route.ts b/web/studio/app/api/completion/yiyan/route.ts index 478b689..94923d9 100644 --- a/web/studio/app/api/completion/yiyan/route.ts +++ b/web/studio/app/api/completion/yiyan/route.ts @@ -1,31 +1,31 @@ -import { OpenAIStream, StreamingTextResponse } from "ai"; +import { OpenAIStream, StreamingTextResponse } from 'ai'; -import { ErnieAPI } from "./erniebot"; +import { ErnieAI } from '@studio-b3/llmapi'; -const api = new ErnieAPI({ - // 访问令牌通过编程对 AI Studio ⽤户进⾏身份验证 - // https://aistudio.baidu.com/index/accessToken - token: process.env.AISTUDIO_ACCESS_TOKEN || '', +const api = new ErnieAI({ + // 访问令牌通过编程对 AI Studio ⽤户进⾏身份验证 + // https://aistudio.baidu.com/index/accessToken + token: process.env.AISTUDIO_ACCESS_TOKEN || '', }); // export const runtime = 'edge'; export async function POST(req: Request) { - const { prompt } = await req.json(); + const { prompt } = await req.json(); - const response = await api.chat.completions.create({ - model: "ernie-bot", - stream: true, - temperature: 0.6, - max_tokens: 1000, - messages: [ - { - role: "user", - content: prompt, - }, - ], - }); + const response = await api.chat.completions.create({ + model: 'ernie-bot', + stream: true, + temperature: 0.6, + max_tokens: 1000, + messages: [ + { + role: 'user', + content: prompt, + }, + ], + }); - const stream = OpenAIStream(response); - return new StreamingTextResponse(stream); + const stream = OpenAIStream(response); + return new StreamingTextResponse(stream); } diff --git a/web/studio/app/api/images/generate/route.ts b/web/studio/app/api/images/generate/route.ts new file mode 100644 index 0000000..3ff0395 --- /dev/null +++ b/web/studio/app/api/images/generate/route.ts @@ -0,0 +1,37 @@ +import { VYroAI } from '@studio-b3/llmapi'; + +// Imagine Art +// see https://platform.imagine.art/dashboard +const api = new VYroAI({ + apiKey: process.env.VYRO_API_KEY, + apiType: process.env.VYRO_API_TYPE, +}); + +// export const runtime = 'edge'; + +export async function GET(req: Request) { + const { searchParams } = new URL(req.url); + + const prompt = searchParams.get('prompt')?.trim(); + + if (!prompt) { + return new Response('prompt must be required', { + status: 400, + }); + } + + const response = await api.images.generate({ + model: 'imagine-v5', + prompt: prompt, + }); + + // TODO 目前只支持单图 + const image = response.data[0].binary!; + + // Respond with the stream + return new Response(image as globalThis.ReadableStream, { + headers: { + 'Content-Type': 'image/png', + }, + }); +} diff --git a/web/studio/package.json b/web/studio/package.json index 400189a..3aface5 100644 --- a/web/studio/package.json +++ b/web/studio/package.json @@ -22,6 +22,7 @@ }, "dependencies": { "@studio-b3/web-core": "workspace:^", + "@studio-b3/llmapi": "workspace:^", "ai": "^2.2.25", "next": "^14", "openai": "^4.20.0",