diff --git a/src/components/Chat/Chat.tsx b/src/components/Chat/Chat.tsx index 181969335..9d83b8556 100644 --- a/src/components/Chat/Chat.tsx +++ b/src/components/Chat/Chat.tsx @@ -92,6 +92,7 @@ import handleTools, { useFetchAllWorkflows, } from '~/utils/functionCalling/handleFunctionCalling' import { useFetchEnabledDocGroups } from '~/hooks/docGroupsQueries' +import { buildPrompt } from '~/pages/api/chat' const montserrat_med = Montserrat({ weight: '500', @@ -384,8 +385,7 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { controller: AbortController, ) => { const imageContent = (message.content as Content[]).filter( - (content) => - content.type === 'image_url' + (content) => content.type === 'image_url', ) let imgDesc = '' if (imageContent.length > 0) { @@ -587,6 +587,21 @@ export const Chat = memo(({ stopConversationRef, courseMetadata }: Props) => { isImage: false, } + // src/pages/api/buildPrompt.ts + const buildPromptResponse = await fetch('/api/buildPrompt', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify(chatBody), + }) + chatBody.conversation = await buildPromptResponse.json() + updatedConversation = chatBody.conversation + // homeDispatch({ + // field: 'selectedConversation', + // value: chatBody.conversation, + // }) + // Call the OpenAI API const response = await fetch(endpoint, { method: 'POST', diff --git a/src/components/UIUC-Components/N8nWorkflowsTable.tsx b/src/components/UIUC-Components/N8nWorkflowsTable.tsx index 24881597a..88c7a5b09 100644 --- a/src/components/UIUC-Components/N8nWorkflowsTable.tsx +++ b/src/components/UIUC-Components/N8nWorkflowsTable.tsx @@ -53,7 +53,7 @@ export const N8nWorkflowsTable = ({ isSuccess: isSuccess, isError: isErrorTools, refetch: refetchWorkflows, - } = useFetchAllWorkflows(course_name, n8nApiKey, 10, 'true', true) + } = useFetchAllWorkflows(course_name, n8nApiKey, 20, 'true', true) const mutate_active_flows = useMutation({ mutationFn: async ({ id, checked }: { id: string; checked: boolean }) => { diff --git a/src/pages/api/UIUC-api/logConversationToSupabase.ts b/src/pages/api/UIUC-api/logConversationToSupabase.ts index 41834a3a0..b9ad63668 100644 --- a/src/pages/api/UIUC-api/logConversationToSupabase.ts +++ b/src/pages/api/UIUC-api/logConversationToSupabase.ts @@ -1,10 +1,12 @@ import { supabase } from '@/utils/supabaseClient' -import { traceable } from 'langsmith/traceable' -import { Conversation } from '~/types/chat' -import { buildPrompt } from '../chat' +import { Content, Conversation } from '~/types/chat' +import { RunTree } from 'langsmith' const logConversationToSupabase = async (req: any, res: any) => { - const { course_name, conversation } = req.body + const { course_name, conversation } = req.body as { + course_name: string + conversation: Conversation + } const { data, error } = await supabase.from('llm-convo-monitor').upsert( [ @@ -23,21 +25,69 @@ const logConversationToSupabase = async (req: any, res: any) => { console.log('error form supabase:', error) } - // TODO get userMessage from BuildPrompt - // const userMessage = buildPrompt(conversation, openaiKey, course_name, metadata) + console.log('👇👇👇👇👇👇👇👇👇👇👇👇👇') + console.log( + '2nd Latest message object (user)', + conversation.messages[conversation.messages.length - 2], + ) + console.log( + 'Latest message object (assistant)', + conversation.messages[conversation.messages.length - 1], + ) + console.log('full convo id', conversation.id) + console.log( + 'User message', + ( + conversation.messages[conversation.messages.length - 2] + ?.content[0] as Content + ).text, + ) + console.log( + 'Assistant message', + conversation.messages[conversation.messages.length - 2]?.content, + ) + console.log( + 'Engineered prompt', + conversation.messages[conversation.messages.length - 2]! + .finalPromtEngineeredMessage, + ) + console.log( + 'System message', + conversation.messages[conversation.messages.length - 2]! + .latestSystemMessage, + ) + console.log('👆👆👆👆👆👆👆👆👆👆👆👆👆') - // console.log('👇👇👇👇👇👇👇👇👇👇👇👇👇') - // console.log('full userMessage', userMessage) - // console.log('👆👆👆👆👆👆👆👆👆👆👆👆👆') + // Log to Langsmith + const rt = new RunTree({ + run_type: 'llm', + name: 'Final Response Log', + // inputs: { "Messages": conversation.messages }, + inputs: { + 'User input': ( + conversation.messages[conversation.messages.length - 2] + ?.content[0] as Content + ).text, + 'System message': + conversation.messages[conversation.messages.length - 2]! + .latestSystemMessage, + 'Engineered prompt': + conversation.messages[conversation.messages.length - 2]! + .finalPromtEngineeredMessage, + }, + outputs: { + Assistant: + conversation.messages[conversation.messages.length - 1]?.content, + }, + project_name: 'test-custom-logs', + metadata: { projectName: course_name, conversation_id: conversation.id }, // "conversation_id" is a SPECIAL KEYWORD. CANNOT BE ALTERED: https://docs.smith.langchain.com/old/monitoring/faq/threads + // id: conversation.id, // DON'T USE - breaks the threading support + }) - // TODO: Log to langsmith - // const chatModel = traceable( - // async (lastUserMessageAsSubmitted) => { - // return - // }, - // { run_type: "llm", name: "logConversationSupabase", metadata: { projectName: course_name, contexts: lastContexts }, inputs: { lastUserMessageAsSubmitted }, outputs: { lastAIMessage } } - // ) - // await chatModel(lastUserMessageAsSubmitted) + // End and submit the run + rt.end() + await rt.postRun() + console.log('✅✅✅✅✅✅✅✅ AFTER ALL LANGSMITH CALLS') return res.status(200).json({ success: true }) } diff --git a/src/pages/api/buildPrompt.ts b/src/pages/api/buildPrompt.ts new file mode 100644 index 000000000..21c4e18d8 --- /dev/null +++ b/src/pages/api/buildPrompt.ts @@ -0,0 +1,48 @@ +// src/pages/api/chat.ts +import { CourseMetadata } from '~/types/courseMetadata' +import { getCourseMetadata } from '~/pages/api/UIUC-api/getCourseMetadata' +// @ts-expect-error - no types +import wasm from '../../../node_modules/@dqbd/tiktoken/lite/tiktoken_bg.wasm?module' +import tiktokenModel from '@dqbd/tiktoken/encoders/cl100k_base.json' +import { Tiktoken, init } from '@dqbd/tiktoken/lite/init' +import { OpenAIError, OpenAIStream } from '@/utils/server' +import { + ChatBody, + Content, + ContextWithMetadata, + Conversation, + MessageType, + OpenAIChatMessage, + ToolOutput, + UIUCTool, +} from '@/types/chat' +import { NextResponse } from 'next/server' +import { decrypt, isEncrypted } from '~/utils/crypto' +import { buildPrompt } from './chat' + +export const config = { + runtime: 'edge', +} + +// A POST request endpoint that just calls buildPrompt and returns that as a json body. +export default async (req: Request): Promise => { + try { + const { conversation, key, course_name, courseMetadata, isImage } = + (await req.json()) as ChatBody + + console.log('In build prompt fetch endpoint!!') + + const updatedConversation = await buildPrompt({ + conversation, + rawOpenaiKey: key, + projectName: course_name, + courseMetadata, + isImage, + }) + + return new NextResponse(JSON.stringify(updatedConversation)) + } catch (error) { + console.error('Error in buildPromptAPI:', error) + return new NextResponse(JSON.stringify({ error: (error as Error).message })) + } +} diff --git a/src/pages/api/chat.ts b/src/pages/api/chat.ts index 13f6e7dac..6c4d9c4d5 100644 --- a/src/pages/api/chat.ts +++ b/src/pages/api/chat.ts @@ -11,6 +11,7 @@ import { Content, ContextWithMetadata, Conversation, + Message, MessageType, OpenAIChatMessage, ToolOutput, @@ -29,41 +30,46 @@ const handler = async (req: Request): Promise => { (await req.json()) as ChatBody // Call buildPrompt - const { systemPrompt, userPrompt, convoHistory, openAIKey } = - await buildPrompt({ - conversation, - rawOpenaiKey: key, - projectName: course_name, - courseMetadata, - isImage - }) - - console.log( - 'PROMPT TO BE SENT -- ', - userPrompt, - 'system prompt:', - systemPrompt, - ) + // const { systemPrompt, userPrompt, convoHistory, openAIKey } = + // await buildPrompt({ + // conversation, + // rawOpenaiKey: key, + // projectName: course_name, + // courseMetadata, + // isImage + // }) - const latestMessage: OpenAIChatMessage = { - role: 'user', - content: [ - { - type: 'text' as MessageType, - text: userPrompt, - }, - ], - } - const messagesToSend = [latestMessage, ...convoHistory] - - console.log('Messages to send: ', messagesToSend) + const openAIKey = await parseOpenaiKey(key) + + // console.log( + // 'PROMPT TO BE SENT -- ', + // userPrompt, + // 'system prompt:', + // systemPrompt, + // ) + + // const latestMessage: OpenAIChatMessage = { + // role: 'user', + // content: [ + // { + // type: 'text' as MessageType, + // text: userPrompt, + // }, + // ], + // } + + // const messagesToSend = [latestMessage, ...convoHistory] // BUG: REPLACE (not append to) latest user message. RN we have dupliacates. + + // console.log('Messages to send: ', messagesToSend) const apiStream = await OpenAIStream( conversation.model, - systemPrompt, + conversation.messages[conversation.messages.length - 1]! + .latestSystemMessage!, conversation.temperature, openAIKey, - messagesToSend, + // @ts-ignore -- I think the types are fine. + conversation.messages, // old: messagesToSend stream, ) if (stream) { @@ -108,16 +114,19 @@ export const buildPrompt = async ({ rawOpenaiKey, projectName, courseMetadata, - isImage + isImage, }: { conversation: Conversation rawOpenaiKey: string projectName: string courseMetadata: CourseMetadata | undefined isImage: boolean -}): Promise => { + // }): Promise => { +}): Promise => { /* System prompt -- defined by user. Then we add the citations instructions to it. + + isImage -- means we're JUST generating an image description, not a final answer. Priorities for building prompt w/ limited window: 1. ✅ most recent user message @@ -146,7 +155,7 @@ Priorities for building prompt w/ limited window: allPromises.push(_getSystemPrompt({ courseMetadata, conversation })) // ideally, run context search here -- parallelized. (tricky due to sending status updates homeDispatch) const [openaiKey, lastUserMessage, lastToolResult, userDefinedSystemPrompt] = - await Promise.all(allPromises) as [string, string, UIUCTool[], string] + (await Promise.all(allPromises)) as [string, string, UIUCTool[], string] // SYSTEM PROMPT let systemPrompt = '' @@ -159,11 +168,6 @@ Priorities for building prompt w/ limited window: remainingTokenBudget -= encoding.encode(systemPrompt).length } - // USER PROMPT - let userPrompt = '' - userPrompt += lastUserMessage - remainingTokenBudget -= encoding.encode(lastUserMessage || '').length - // TOOLS // if (lastToolResult && remainingTokenBudget > 0) { // let toolMsg = `The user invoked API(s), aka tool(s), and here's the tool output(s). Remember, use this information when relevant in crafting your response. Cite it directly using an inline citation. Tool output: ` @@ -182,8 +186,13 @@ Priorities for building prompt w/ limited window: // userPrompt += toolMsg // } // } + + // USER PROMPT + let userPrompt = '' + remainingTokenBudget -= encoding.encode(lastUserMessage).length + // Tool output + user Query (added to prompt below) - const userQuery = isImage? '': _buildUserQuery({ conversation }) + const userQuery = isImage ? '' : _buildUserQuery({ conversation }) // Keep room in budget for latest 2 convo messages const tokensInLastTwoMessages = _getRecentConvoTokens({ @@ -203,7 +212,7 @@ Priorities for building prompt w/ limited window: tokenLimit: remainingTokenBudget - tokensInLastTwoMessages, // keep room for convo history }) if (query_topContext) { - const queryContextMsg = `\nHere's high quality passages from the user's documents. Use these, and cite them carefully in the format previously described, to construct your answer: ${query_topContext}` + const queryContextMsg = `\nHere's high quality passages from the user's documents. Use these, and cite them carefully in the format previously described, to construct your answer:\n${query_topContext}` remainingTokenBudget -= encoding.encode(queryContextMsg).length userPrompt += queryContextMsg } @@ -215,16 +224,25 @@ Priorities for building prompt w/ limited window: tokenLimit: remainingTokenBudget, }) - userPrompt = `\nFinally, please respond to the user's query: ${userPrompt} ${userQuery}` + userPrompt += `\nFinally, please respond to the user's query: ${userQuery} ${userPrompt}` encoding.free() // keep this - return { - systemPrompt: systemPrompt as string, - userPrompt, - convoHistory, - openAIKey: openaiKey as string, - } + // conversation.messages = convoHistory // Necessary?? + // latest user message + conversation.messages[ + conversation.messages.length - 1 + ]!.finalPromtEngineeredMessage = userPrompt + conversation.messages[conversation.messages.length - 1]!.latestSystemMessage = + systemPrompt + return conversation + + // return { + // systemPrompt: systemPrompt as string, + // userPrompt, + // convoHistory, + // openAIKey: openaiKey as string, + // } } const _getRecentConvoTokens = ({ @@ -258,9 +276,8 @@ const _buildUserQuery = ({ }): string => { // ! PROMPT STUFFING let userQuery: string = '' - const latestUserMessage = conversation.messages[ - conversation.messages.length - 1 - ] + const latestUserMessage = + conversation.messages[conversation.messages.length - 1] if (latestUserMessage?.content === 'string') { userQuery = latestUserMessage.content as string } else if (latestUserMessage?.tools) { @@ -271,9 +288,14 @@ const _buildUserQuery = ({ if (tool.output && tool.output.text) { toolOutput += `Tool: ${tool.name}\nOutput: ${tool.output.text}\n` } else if (tool.output && tool.output.imageUrls) { - toolOutput += `Tool: ${tool.name}\nOutput: Images were generated by this tool call and the generated image(s) is/are provided below`; + toolOutput += `Tool: ${tool.name}\nOutput: Images were generated by this tool call and the generated image(s) is/are provided below` // Add image urls to message content - (latestUserMessage.content as Content[]).push(...tool.output.imageUrls.map((imageUrl) => ({ type: 'image_url' as MessageType, image_url: { url: imageUrl } }))); + ;(latestUserMessage.content as Content[]).push( + ...tool.output.imageUrls.map((imageUrl) => ({ + type: 'image_url' as MessageType, + image_url: { url: imageUrl }, + })), + ) } else if (tool.output && tool.output.data) { toolOutput += `Tool: ${tool.name}\nOutput: ${JSON.stringify(tool.output.data)}\n` } else if (tool.error) { @@ -281,9 +303,9 @@ const _buildUserQuery = ({ } toolMsg += toolOutput }) - userQuery += toolMsg += "\n\n" + userQuery += toolMsg += '\n\n' // userQuery += "User Query: " + ((latestUserMessage?.content as Content[])?.map((c) => c.text || '').join('\n') || '') - } + } // console.log('Built userQuery: ', userQuery) return userQuery } @@ -409,6 +431,7 @@ const _getSystemPrompt = async ({ } return systemPrompt } + const _getLastToolResult = async ({ conversation, }: { @@ -419,6 +442,7 @@ const _getLastToolResult = async ({ ]?.tools as UIUCTool[] return toolResults } + const _getLastUserMessage = async ({ conversation, }: { diff --git a/src/pages/api/getContexts.ts b/src/pages/api/getContexts.ts index e2269af22..cfe477869 100644 --- a/src/pages/api/getContexts.ts +++ b/src/pages/api/getContexts.ts @@ -18,6 +18,32 @@ export const fetchContexts = async ( doc_groups: doc_groups, } + const dummyContexts: ContextWithMetadata[] = [ + { + id: 1, + text: 'This is a dummy context', + readable_filename: 'dummy_filename_1.pdf', + course_name: 'dummy course 1', + 'course_name ': 'dummy course 1', + s3_path: 'dummy_s3_path_1', + pagenumber: '1', + url: 'dummy_url_1', + base_url: 'dummy_base_url_1', + }, + { + id: 2, + text: 'This is another dummy context', + readable_filename: 'dummy_filename_2.pdf', + course_name: 'dummy course 2', + 'course_name ': 'dummy course 2', + s3_path: 'dummy_s3_path_2', + pagenumber: '2', + url: 'dummy_url_2', + base_url: 'dummy_base_url_2', + }, + ] + return dummyContexts + const url = `https://flask-production-751b.up.railway.app/getTopContexts` try { diff --git a/src/types/chat.ts b/src/types/chat.ts index 6dc55cbf0..e65c4374c 100644 --- a/src/types/chat.ts +++ b/src/types/chat.ts @@ -20,6 +20,8 @@ export interface Message { content: string | Content[] contexts?: ContextWithMetadata[] tools?: UIUCTool[] + latestSystemMessage?: string + finalPromtEngineeredMessage?: string // after all prompt enginering, to generate final response. responseTimeSec?: number } @@ -38,20 +40,20 @@ export interface UIUCTool { enabled?: boolean createdAt?: string updatedAt?: string - output?: ToolOutput; // Use a unified output type + output?: ToolOutput // Use a unified output type error?: string contexts?: ContextWithMetadata[] } export type ToolOutput = { - text?: string; // For plain text outputs - imageUrls?: string[]; // For image URLs - s3Paths?: string[]; // For S3 paths of uploaded files - data?: Record; // For any other structured data -}; + text?: string // For plain text outputs + imageUrls?: string[] // For image URLs + s3Paths?: string[] // For S3 paths of uploaded files + data?: Record // For any other structured data +} // tool_image_url is for images returned by tools -export type MessageType = 'text' | 'image_url' | 'tool_image_url' +export type MessageType = 'text' | 'image_url' export interface Content { type: MessageType diff --git a/src/utils/functionCalling/handleFunctionCalling.ts b/src/utils/functionCalling/handleFunctionCalling.ts index 57d873732..97347c7e6 100644 --- a/src/utils/functionCalling/handleFunctionCalling.ts +++ b/src/utils/functionCalling/handleFunctionCalling.ts @@ -77,7 +77,7 @@ export default async function handleTools( const toolResultsPromises = uiucToolsToRun.map(async (tool) => { try { const toolOutput = await callN8nFunction(tool, 'todo!') // TODO: Get API key - handleToolOutput(toolOutput, tool); + handleToolOutput(toolOutput, tool) } catch (error: unknown) { console.error( `Error running tool: ${error instanceof Error ? error.message : error}`, @@ -108,33 +108,39 @@ export default async function handleTools( const handleToolOutput = async (toolOutput: any, tool: UIUCTool) => { // Handle case where toolOutput is a simple string if (typeof toolOutput === 'string') { - tool.output = { text: toolOutput }; - } + tool.output = { text: toolOutput } + } // Handle case where toolOutput contains image URLs else if (toolOutput.imageUrls && Array.isArray(toolOutput.imageUrls)) { - tool.output = { imageUrls: toolOutput.imageUrls }; - } + tool.output = { imageUrls: toolOutput.imageUrls } + } // Handle case where toolOutput is a single Blob object (binary data) else if (toolOutput.data instanceof Blob) { - const s3Key = await uploadToS3(toolOutput.data, tool.name) as string; - tool.output = { s3Paths: [s3Key] }; - } + const s3Key = (await uploadToS3(toolOutput.data, tool.name)) as string + tool.output = { s3Paths: [s3Key] } + } // Handle case where toolOutput is an array of Blob objects - else if (Array.isArray(toolOutput) && toolOutput.every((item: any) => item instanceof Blob)) { + else if ( + Array.isArray(toolOutput) && + toolOutput.every((item: any) => item instanceof Blob) + ) { const s3KeysPromises = toolOutput.map(async (blob: Blob) => { - const file = new File([blob], "filename", { type: blob.type, lastModified: Date.now() }); - return uploadToS3(file, tool.name); - }); - const s3Keys = await Promise.all(s3KeysPromises) as string[]; - tool.output = { s3Paths: s3Keys }; + const file = new File([blob], 'filename', { + type: blob.type, + lastModified: Date.now(), + }) + return uploadToS3(file, tool.name) + }) + const s3Keys = (await Promise.all(s3KeysPromises)) as string[] + tool.output = { s3Paths: s3Keys } } else if (tool.output && Array.isArray(toolOutput)) { - tool.output.data = toolOutput.reduce((acc, cur) => ({ ...acc, ...cur }), {}); + tool.output.data = toolOutput.reduce((acc, cur) => ({ ...acc, ...cur }), {}) } // Default case: directly assign toolOutput to tool.output else { - tool.output = toolOutput; + tool.output = toolOutput } -}; +} // TODO: finalize this function calling const callN8nFunction = async (tool: UIUCTool, n8n_api_key: string) => {