diff --git a/chat-sample/src/extension.ts b/chat-sample/src/extension.ts index ad9d22ebb..da745e586 100644 --- a/chat-sample/src/extension.ts +++ b/chat-sample/src/extension.ts @@ -23,16 +23,21 @@ export function activate(context: vscode.ExtensionContext) { if (request.command == 'teach') { stream.progress('Picking the right topic to teach...'); const topic = getTopic(context.history); - const messages = [ - vscode.LanguageModelChatMessage.User('You are a cat! Your job is to explain computer science concepts in the funny manner of a cat. Always start your response by stating what concept you are explaining. Always include code samples.'), - vscode.LanguageModelChatMessage.User(topic) - ]; - const [model] = await vscode.lm.selectChatModels(MODEL_SELECTOR); - if (model) { - const chatResponse = await model.sendRequest(messages, {}, token); - for await (const fragment of chatResponse.text) { - stream.markdown(fragment); + try { + const [model] = await vscode.lm.selectChatModels(MODEL_SELECTOR); + if (model) { + const messages = [ + vscode.LanguageModelChatMessage.User('You are a cat! Your job is to explain computer science concepts in the funny manner of a cat. Always start your response by stating what concept you are explaining. Always include code samples.'), + vscode.LanguageModelChatMessage.User(topic) + ]; + + const chatResponse = await model.sendRequest(messages, {}, token); + for await (const fragment of chatResponse.text) { + stream.markdown(fragment); + } } + } catch(err) { + handleError(err, stream); } stream.button({ @@ -43,36 +48,46 @@ export function activate(context: vscode.ExtensionContext) { return { metadata: { command: 'teach' } }; } else if (request.command == 'play') { stream.progress('Throwing away the computer science books and preparing to play with some Python code...'); - const [model] = await vscode.lm.selectChatModels(MODEL_SELECTOR); - if (model) { - // Here's an example of how to use the prompt-tsx library to build a prompt - const { messages } = await renderPrompt( - PlayPrompt, - { userQuery: request.prompt }, - { modelMaxPromptTokens: model.maxInputTokens }, - new Cl100KBaseTokenizer()); - const chatResponse = await model.sendRequest(messages, {}, token); - for await (const fragment of chatResponse.text) { - stream.markdown(fragment); + try { + const [model] = await vscode.lm.selectChatModels(MODEL_SELECTOR); + if (model) { + // Here's an example of how to use the prompt-tsx library to build a prompt + const { messages } = await renderPrompt( + PlayPrompt, + { userQuery: request.prompt }, + { modelMaxPromptTokens: model.maxInputTokens }, + new Cl100KBaseTokenizer()); + + const chatResponse = await model.sendRequest(messages, {}, token); + for await (const fragment of chatResponse.text) { + stream.markdown(fragment); + } } + } catch(err) { + handleError(err, stream); } return { metadata: { command: 'play' } }; } else { - const messages = [ - vscode.LanguageModelChatMessage.User(`You are a cat! Think carefully and step by step like a cat would. - Your job is to explain computer science concepts in the funny manner of a cat, using cat metaphors. Always start your response by stating what concept you are explaining. Always include code samples.`), - vscode.LanguageModelChatMessage.User(request.prompt) - ]; - const [model] = await vscode.lm.selectChatModels(MODEL_SELECTOR); - if (model) { - const chatResponse = await model.sendRequest(messages, {}, token); - for await (const fragment of chatResponse.text) { - // Process the output from the language model - // Replace all python function definitions with cat sounds to make the user stop looking at the code and start playing with the cat - const catFragment = fragment.replaceAll('def', 'meow'); - stream.markdown(catFragment); + try { + const [model] = await vscode.lm.selectChatModels(MODEL_SELECTOR); + if (model) { + const messages = [ + vscode.LanguageModelChatMessage.User(`You are a cat! Think carefully and step by step like a cat would. + Your job is to explain computer science concepts in the funny manner of a cat, using cat metaphors. Always start your response by stating what concept you are explaining. Always include code samples.`), + vscode.LanguageModelChatMessage.User(request.prompt) + ]; + + const chatResponse = await model.sendRequest(messages, {}, token); + for await (const fragment of chatResponse.text) { + // Process the output from the language model + // Replace all python function definitions with cat sounds to make the user stop looking at the code and start playing with the cat + const catFragment = fragment.replaceAll('def', 'meow'); + stream.markdown(catFragment); + } } + } catch(err) { + handleError(err, stream); } return { metadata: { command: '' } }; @@ -100,11 +115,6 @@ export function activate(context: vscode.ExtensionContext) { vscode.commands.registerTextEditorCommand(CAT_NAMES_COMMAND_ID, async (textEditor: vscode.TextEditor) => { // Replace all variables in active editor with cat names and words const text = textEditor.document.getText(); - const messages = [ - vscode.LanguageModelChatMessage.User(`You are a cat! Think carefully and step by step like a cat would. - Your job is to replace all variable names in the following code with funny cat variable names. Be creative. IMPORTANT respond just with code. Do not use markdown!`), - vscode.LanguageModelChatMessage.User(text) - ]; let chatResponse: vscode.LanguageModelChatResponse | undefined; try { @@ -114,13 +124,14 @@ export function activate(context: vscode.ExtensionContext) { return; } + const messages = [ + vscode.LanguageModelChatMessage.User(`You are a cat! Think carefully and step by step like a cat would. + Your job is to replace all variable names in the following code with funny cat variable names. Be creative. IMPORTANT respond just with code. Do not use markdown!`), + vscode.LanguageModelChatMessage.User(text) + ]; chatResponse = await model.sendRequest(messages, {}, new vscode.CancellationTokenSource().token); } catch (err) { - // making the chat request might fail because - // - model does not exist - // - user consent not given - // - quote limits exceeded if (err instanceof vscode.LanguageModelError) { console.log(err.message, err.code, err.cause) } @@ -155,6 +166,19 @@ export function activate(context: vscode.ExtensionContext) { ); } +function handleError(err: any, stream: vscode.ChatResponseStream): void { + // making the chat request might fail because + // - model does not exist + // - user consent not given + // - quote limits exceeded + if (err instanceof vscode.LanguageModelError) { + console.log(err.message, err.code, err.cause); + if (err.cause instanceof Error && err.cause.message.includes('off_topic')) { + stream.markdown(vscode.l10n.t('I\'m sorry, I can only explain computer science concepts.')); + } + } +} + // Get a random topic that the cat has not taught in the chat history yet function getTopic(history: ReadonlyArray): string { const topics = ['linked list', 'recursion', 'stack', 'queue', 'pointers'];