diff --git a/next.config.mjs b/next.config.mjs index 1c8586c95..4130921f2 100644 --- a/next.config.mjs +++ b/next.config.mjs @@ -37,6 +37,27 @@ const config = { experimental: { esmExternals: false, // To make upload thing work with /pages router. }, + async headers() { + return [ + { + source: '/api/:path*', + headers: [ + { + key: 'Access-Control-Allow-Origin', + value: '*', + }, + { + key: 'Access-Control-Allow-Methods', + value: 'GET,PUT,POST,DELETE,OPTIONS', + }, + { + key: 'Access-Control-Allow-Headers', + value: 'X-Requested-With, X-HTTP-Method-Override, Content-Type, Accept', + }, + ], + }, + ] + }, } const withAxiomConfig = withAxiom(config) diff --git a/src/pages/api/chat.ts b/src/pages/api/chat.ts index 94b2d8f6c..edd47756c 100644 --- a/src/pages/api/chat.ts +++ b/src/pages/api/chat.ts @@ -17,8 +17,10 @@ export const config = { const handler = async (req: Request): Promise => { try { + console.log("Top of /api/chat.ts. req: ", req) const { model, messages, key, prompt, temperature, course_name, stream } = (await req.json()) as ChatBody + console.log("After message parsing: ", model, messages, key, prompt, temperature, course_name, stream) await init((imports) => WebAssembly.instantiate(wasm, imports)) const encoding = new Tiktoken( @@ -27,7 +29,9 @@ const handler = async (req: Request): Promise => { tiktokenModel.pat_str, ) + const token_limit = OpenAIModels[model.id as OpenAIModelID].tokenLimit + console.log("Model's token limit", token_limit) let promptToSend = prompt if (!promptToSend) {