diff --git a/README.md b/README.md index 59a2ceb..54b6395 100644 --- a/README.md +++ b/README.md @@ -73,6 +73,7 @@ To use Internet search you need a [Tavily API key](https://app.tavily.com/home). ## DONE +- [x] Cancel commands - [x] GPT-4o support - [x] Different default engine/model for commands - [x] Text attachments (TXT, PDF, DOCX, PPTX, XLSX) diff --git a/src/automations/commander.ts b/src/automations/commander.ts index 228dd20..b520d00 100644 --- a/src/automations/commander.ts +++ b/src/automations/commander.ts @@ -15,188 +15,219 @@ import { v4 as uuidv4 } from 'uuid' const textCache: strDict = {} -export const putCachedText = (text: string): string => { - const id = uuidv4() - textCache[id] = text - return id -} +export default class Commander { -export const getCachedText = (id: string): string => { - const prompt = textCache[id] - delete textCache[id] - return prompt -} + private llm: LlmEngine + private cancelled: boolean + + constructor(llm?: LlmEngine) { + this.llm = llm + this.cancelled = false + } -const promptLlm = (llm: LlmEngine, model: string, prompt: string): Promise => { + cancelCommand = async (): Promise => { - // build messages - const messages: Message[] = [ - new Message('user', prompt) - ] + // close stuff + await window.closeWaitingPanel(); + await window.releaseFocus(); - // now get it - return llm.complete(messages, { model: model }) + // record + this.cancelled = true; -} - -const finalizeCommand = async (command: Command, text: string, engine: string, model: string): Promise => { + } - // we need an automator - const automator = new Automator(); + static initCommand = async (): Promise => { + + // hide active windows + window.hideActiveWindows(); + await window.releaseFocus(); + + // grab text + const automator = new Automator(); + const text = await automator.getSelectedText(); + //console.log('Text grabbed', text); + + // // select all + // if (text == null || text.trim() === '') { + // await automator.selectAll(); + // text = await automator.getSelectedText(); + // } + + // error + if (text == null) { + try { + new Notification({ + title: 'Witsy', + body: 'An error occurred while trying to grab the text. Please check Privacy & Security settings.' + }).show() + window.restoreWindows(); + } catch (error) { + console.error('Error showing notification', error); + } + return; + } - if (command.action === 'chat_window') { + // notify if no text + if (text.trim() === '') { + try { + new Notification({ + title: 'Witsy', + body: 'Please highlight the text you want to analyze' + }).show() + console.log('No text selected'); + window.restoreWindows(); + } catch (error) { + console.error('Error showing notification', error); + } + return; + } - return window.openChatWindow({ - promptId: putCachedText(text), - engine: engine || command.engine, - model: model || command.model - }) - - } else if (command.action === 'paste_below') { + // log + console.debug('Text grabbed:', `${text.slice(0, 50)}…`); - await automator.moveCaretBelow() - await automator.pasteText(text) + // go on with a cached text id + const textId = Commander.putCachedText(text); + await window.openCommandPalette(textId) - } else if (command.action === 'paste_in_place') { + } - await automator.pasteText(text) + execCommand = async (app: App, textId: string, command: Command): Promise => { - } else if (command.action === 'clipboard_copy') { + // + const result: RunCommandResponse = { + text: Commander.getCachedText(textId), + prompt: null as string | null, + response: null as string | null, + chatWindow: null as BrowserWindow | null, + cancelled: false + }; - await automator.copyToClipboard(text) + try { - } + // check + if (!result.text) { + throw new Error('No text to process'); + } -} + // config + const config: Configuration = loadSettings(app); + + // extract what we need + const template = command.template; + const action = command.action; + const engine = command.engine || config.commands.engine || config.llm.engine; + const model = command.model || config.commands.model || config.getActiveModel(); + // const temperature = command.temperature; + + // build prompt + result.prompt = template.replace('{input}', result.text); + + // new window is different + if (action === 'chat_window') { + + result.chatWindow = await this.finishCommand(command, result.prompt, engine, model); + + } else { + + // open waiting panel + window.openWaitingPanel(); + + // we need an llm + if (!this.llm) { + this.llm = igniteEngine(engine, config); + if (!this.llm) { + throw new Error(`Invalid LLM engine: ${engine}`) + } + } -export const prepareCommand = async (): Promise => { + // now prompt llm + console.debug(`Prompting with ${result.prompt.slice(0, 50)}…`); + const response = await this.promptLlm(model, result.prompt); + result.response = response.content; - // hide active windows - window.hideActiveWindows(); - await window.releaseFocus(); + // if cancelled + if (this.cancelled) { + console.debug('Discarding LLM output as command was cancelled'); + result.cancelled = true; + return result; + } - // grab text - const automator = new Automator(); - const text = await automator.getSelectedText(); - //console.log('Text grabbed', text); + // done + await window.closeWaitingPanel(); + await window.releaseFocus(); - // // select all - // if (text == null || text.trim() === '') { - // await automator.selectAll(); - // text = await automator.getSelectedText(); - // } + // now paste + console.debug(`Processing LLM output: ${result.response.slice(0, 50)}…`); + await this.finishCommand(command, result.response, engine, model); - // error - if (text == null) { - try { - new Notification({ - title: 'Witsy', - body: 'An error occurred while trying to grab the text. Please check Privacy & Security settings.' - }).show() - window.restoreWindows(); - } catch (error) { - console.error('Error showing notification', error); - } - return; - } + } - // notify if no text - if (text.trim() === '') { - try { - new Notification({ - title: 'Witsy', - body: 'Please highlight the text you want to analyze' - }).show() - console.log('No text selected'); - window.restoreWindows(); } catch (error) { - console.error('Error showing notification', error); + console.error('Error while testing', error); } - return; + + // done waiting + console.log('Destroying waiting panel') + await window.closeWaitingPanel(true); + window.releaseFocus(); + + // done + return result; + } - // log - console.debug('Text grabbed:', `${text.slice(0, 50)}…`); + private promptLlm = (model: string, prompt: string): Promise => { - // go on with a cached text id - const textId = putCachedText(text); - await window.openCommandPalette(textId) + // build messages + const messages: Message[] = [ + new Message('user', prompt) + ] -} + // now get it + return this.llm.complete(messages, { model: model }) -export const runCommand = async (app: App, llm: LlmEngine, textId: string, command: Command): Promise => { + } - // - const result: RunCommandResponse = { - text: getCachedText(textId), - prompt: null as string | null, - response: null as string | null, - chatWindow: null as BrowserWindow | null, - }; + private finishCommand = async (command: Command, text: string, engine: string, model: string): Promise => { + + // we need an automator + const automator = new Automator(); - try { + if (command.action === 'chat_window') { - // check - if (!result.text) { - throw new Error('No text to process'); - } + return window.openChatWindow({ + promptId: Commander.putCachedText(text), + engine: engine || command.engine, + model: model || command.model + }) + + } else if (command.action === 'paste_below') { - // config - const config: Configuration = loadSettings(app); - - // extract what we need - const template = command.template; - const action = command.action; - const engine = command.engine || config.commands.engine || config.llm.engine; - const model = command.model || config.commands.model || config.getActiveModel(); - // const temperature = command.temperature; - - // build prompt - result.prompt = template.replace('{input}', result.text); - - // new window is different - if (action === 'chat_window') { - - result.chatWindow = await finalizeCommand(command, result.prompt, engine, model); - - } else { - - // open waiting panel - window.openWaitingPanel(); - - // we need an llm - if (!llm) { - llm = igniteEngine(engine, config); - if (!llm) { - throw new Error(`Invalid LLM engine: ${engine}`) - } - } + await automator.moveCaretBelow() + await automator.pasteText(text) + + } else if (command.action === 'paste_in_place') { - // now prompt llm - console.debug(`Prompting with ${result.prompt.slice(0, 50)}…`); - const response = await promptLlm(llm, model, result.prompt); - result.response = response.content; + await automator.pasteText(text) - // done - await window.closeWaitingPanel(); - await window.releaseFocus(); + } else if (command.action === 'clipboard_copy') { - // now paste - console.debug(`Processing LLM output: ${result.response.slice(0, 50)}…`); - await finalizeCommand(command, result.response, engine, model); + await automator.copyToClipboard(text) } - } catch (error) { - console.error('Error while testing', error); } - // done waiting - console.log('Destroying waiting panel') - await window.closeWaitingPanel(true); - window.releaseFocus(); + static getCachedText = (id: string): string => { + const prompt = textCache[id] + delete textCache[id] + return prompt + } - // done - return result; + static putCachedText = (text: string): string => { + const id = uuidv4() + textCache[id] = text + return id + } } diff --git a/src/main.ts b/src/main.ts index e747204..8e5d70b 100644 --- a/src/main.ts +++ b/src/main.ts @@ -10,6 +10,7 @@ import log from 'electron-log/main'; import { wait } from './main/utils'; import AutoUpdater from './main/autoupdate'; +import Commander from './automations/commander'; import * as config from './main/config'; import * as history from './main/history'; import * as commands from './main/commands'; @@ -18,10 +19,11 @@ import * as file from './main/file'; import * as shortcuts from './main/shortcuts'; import * as window from './main/window'; import * as markdown from './main/markdown'; -import * as commander from './automations/commander'; import * as menu from './main/menu'; import * as text from './main/text'; +let commander: Commander = null + // first-thing: single instance // on darwin/mas this is done through Info.plist (LSMultipleInstancesProhibited) if (process.platform !== 'darwin' && !process.env.TEST) { @@ -54,7 +56,7 @@ const autoUpdater = new AutoUpdater({ const registerShortcuts = () => { shortcuts.registerShortcuts(app, { chat: window.openMainWindow, - command: commander.prepareCommand, + command: Commander.initCommand, }); } @@ -74,7 +76,7 @@ const buildTrayMenu = () => { return [ { label: 'New Chat', accelerator: shortcuts.shortcutAccelerator(configShortcuts?.chat), click: window.openMainWindow }, - { label: 'Run AI Command', accelerator: shortcuts.shortcutAccelerator(configShortcuts?.command), click: commander.prepareCommand }, + { label: 'Run AI Command', accelerator: shortcuts.shortcutAccelerator(configShortcuts?.command), click: Commander.initCommand }, { type: 'separator'}, { label: 'Settings…', click: window.openSettingsWindow }, { type: 'separator'}, @@ -291,7 +293,7 @@ ipcMain.on('render-markdown', (event, payload) => { }); ipcMain.on('get-command-prompt', (event, payload) => { - event.returnValue = commander.getCachedText(payload); + event.returnValue = Commander.getCachedText(payload); }) ipcMain.on('close-command-palette', async () => { @@ -300,10 +302,26 @@ ipcMain.on('close-command-palette', async () => { }); ipcMain.on('run-command', async (event, payload) => { + + // cancel any running command + if (commander !== null) { + commander.cancelCommand(); + } + + // prepare const args = JSON.parse(payload); await window.closeCommandPalette(); await window.releaseFocus(); - const result = await commander.runCommand(app, null, args.textId, args.command); + + // now run + commander = new Commander(); + const result = await commander.execCommand(app, args.textId, args.command); + commander = null; + + // cancelled + if (result.cancelled) return; + + // show chat window window.restoreWindows(); if (result?.chatWindow) { await wait(); @@ -317,6 +335,15 @@ ipcMain.on('run-command', async (event, payload) => { } }); +ipcMain.on('stop-command', async () => { + + if (commander !== null) { + commander.cancelCommand(); + commander = null; + } + +}); + ipcMain.on('run-python-code', async (event, payload) => { try { const result = await PythonShell.runString(payload); diff --git a/src/preload.ts b/src/preload.ts index 7ef7327..2a545ec 100644 --- a/src/preload.ts +++ b/src/preload.ts @@ -53,6 +53,7 @@ contextBridge.exposeInMainWorld( load: (): Command[] => { return JSON.parse(ipcRenderer.sendSync('load-commands')) }, save: (data: Command[]) => { return ipcRenderer.send('save-commands', JSON.stringify(data)) }, run: (command: Command): void => { return ipcRenderer.send('run-command', JSON.stringify(command)) }, + cancel: (): void => { return ipcRenderer.send('stop-command') }, closePalette: (): void => { return ipcRenderer.send('close-command-palette') }, getPrompt: (id: string): string => { return ipcRenderer.sendSync('get-command-prompt', id) }, }, diff --git a/src/screens/Wait.vue b/src/screens/Wait.vue index 501a289..d263357 100644 --- a/src/screens/Wait.vue +++ b/src/screens/Wait.vue @@ -1,6 +1,8 @@ @@ -8,6 +10,10 @@ import Loader from '../components/Loader.vue' +const onCancel = () => { + window.api.commands.cancel() +} + \ No newline at end of file diff --git a/src/types/automation.d.ts b/src/types/automation.d.ts index ebcdb7b..49dc172 100644 --- a/src/types/automation.d.ts +++ b/src/types/automation.d.ts @@ -18,4 +18,5 @@ export interface RunCommandResponse { prompt: string|null response: string|null chatWindow: BrowserWindow | null + cancelled: boolean } diff --git a/tests/unit/commander.test.ts b/tests/unit/commander.test.ts index b5070ad..23bd0e5 100644 --- a/tests/unit/commander.test.ts +++ b/tests/unit/commander.test.ts @@ -1,10 +1,10 @@ -import { vi, beforeEach, expect, test } from 'vitest' +import { vi, beforeAll, beforeEach, expect, test } from 'vitest' import { Command } from '../../src/types/index.d' import { store } from '../../src/services/store' import defaults from '../../defaults/settings.json' -import * as commander from '../../src/automations/commander' import * as window from '../../src/main/window' +import Commander from '../../src/automations/commander' import Automator from '../../src/automations/automator' import LlmMock from '../mocks/llm' @@ -39,7 +39,7 @@ vi.mock('../../src/automations/automator.ts', async () => { return { default: Automator } }) -beforeEach(() => { +beforeAll(() => { // init store store.config = defaults @@ -51,11 +51,15 @@ beforeEach(() => { } store.config.getActiveModel = () => 'chat' - // reset mocks call history +}) + +beforeEach(() => { + + // clear mocks vi.clearAllMocks() // store some text - cachedTextId = commander.putCachedText('Grabbed text') + cachedTextId = Commander.putCachedText('Grabbed text') }) @@ -76,7 +80,7 @@ const buildCommand = (action: 'chat_window' | 'paste_below' | 'paste_in_place' | test('Prepare command', async () => { - await commander.prepareCommand() + await Commander.initCommand() expect(window.hideActiveWindows).toHaveBeenCalledOnce() expect(window.releaseFocus).toHaveBeenCalledOnce() @@ -84,14 +88,15 @@ test('Prepare command', async () => { expect(window.openCommandPalette).toHaveBeenCalledOnce() const textId = window.openCommandPalette.mock.calls[0][0] - expect(commander.getCachedText(textId)).toBe('Grabbed text') + expect(Commander.getCachedText(textId)).toBe('Grabbed text') }) test('Chat Window command', async () => { + const commander = new Commander(new LlmMock(store.config)) const command = buildCommand('chat_window') - await commander.runCommand(null, new LlmMock(store.config), cachedTextId, command) + await commander.execCommand(null, cachedTextId, command) expect(window.openChatWindow).toHaveBeenCalledOnce() expect(window.closeWaitingPanel).toHaveBeenCalledOnce() @@ -102,7 +107,7 @@ test('Chat Window command', async () => { expect(Automator.prototype.copyToClipboard).not.toHaveBeenCalled() const args = window.openChatWindow.mock.calls[0][0] - const prompt = commander.getCachedText(args.promptId) + const prompt = Commander.getCachedText(args.promptId) expect(prompt).toBe('Explain this:\n"""Grabbed text"""') expect(args.engine).toBe('mock') expect(args.model).toBe('chat') @@ -111,8 +116,9 @@ test('Chat Window command', async () => { test('Paste in-place command', async () => { + const commander = new Commander(new LlmMock(store.config)) const command = buildCommand('paste_in_place') - await commander.runCommand(null, new LlmMock(store.config), cachedTextId, command) + await commander.execCommand(null, cachedTextId, command) expect(window.openChatWindow).not.toHaveBeenCalled() expect(Automator.prototype.moveCaretBelow).not.toHaveBeenCalled() @@ -125,8 +131,9 @@ test('Paste in-place command', async () => { test('Paste below command', async () => { + const commander = new Commander(new LlmMock(store.config)) const command = buildCommand('paste_below') - await commander.runCommand(null, new LlmMock(store.config), cachedTextId, command) + await commander.execCommand(null, cachedTextId, command) expect(window.openChatWindow).not.toHaveBeenCalled() expect(Automator.prototype.moveCaretBelow).toHaveBeenCalledOnce() @@ -139,8 +146,9 @@ test('Paste below command', async () => { test('Copy to clipboard command', async () => { + const commander = new Commander(new LlmMock(store.config)) const command = buildCommand('clipboard_copy') - await commander.runCommand(null, new LlmMock(store.config), cachedTextId, command) + await commander.execCommand(null, cachedTextId, command) expect(window.openChatWindow).not.toHaveBeenCalled() expect(Automator.prototype.moveCaretBelow).not.toHaveBeenCalled()