Skip to content

Commit

Permalink
cancel commands
Browse files Browse the repository at this point in the history
  • Loading branch information
nbonamy committed May 14, 2024
1 parent 551019d commit b0509d5
Show file tree
Hide file tree
Showing 7 changed files with 249 additions and 160 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -73,6 +73,7 @@ To use Internet search you need a [Tavily API key](https://app.tavily.com/home).

## DONE

- [x] Cancel commands
- [x] GPT-4o support
- [x] Different default engine/model for commands
- [x] Text attachments (TXT, PDF, DOCX, PPTX, XLSX)
Expand Down
315 changes: 173 additions & 142 deletions src/automations/commander.ts
Original file line number Diff line number Diff line change
Expand Up @@ -15,188 +15,219 @@ import { v4 as uuidv4 } from 'uuid'

const textCache: strDict = {}

export const putCachedText = (text: string): string => {
const id = uuidv4()
textCache[id] = text
return id
}
export default class Commander {

export const getCachedText = (id: string): string => {
const prompt = textCache[id]
delete textCache[id]
return prompt
}
private llm: LlmEngine
private cancelled: boolean

constructor(llm?: LlmEngine) {
this.llm = llm
this.cancelled = false
}

const promptLlm = (llm: LlmEngine, model: string, prompt: string): Promise<LlmResponse> => {
cancelCommand = async (): Promise<void> => {

// build messages
const messages: Message[] = [
new Message('user', prompt)
]
// close stuff
await window.closeWaitingPanel();
await window.releaseFocus();

// now get it
return llm.complete(messages, { model: model })
// record
this.cancelled = true;

}

const finalizeCommand = async (command: Command, text: string, engine: string, model: string): Promise<BrowserWindow|undefined> => {
}

// we need an automator
const automator = new Automator();
static initCommand = async (): Promise<void> => {

// hide active windows
window.hideActiveWindows();
await window.releaseFocus();

// grab text
const automator = new Automator();
const text = await automator.getSelectedText();
//console.log('Text grabbed', text);

// // select all
// if (text == null || text.trim() === '') {
// await automator.selectAll();
// text = await automator.getSelectedText();
// }

// error
if (text == null) {
try {
new Notification({
title: 'Witsy',
body: 'An error occurred while trying to grab the text. Please check Privacy & Security settings.'
}).show()
window.restoreWindows();
} catch (error) {
console.error('Error showing notification', error);
}
return;
}

if (command.action === 'chat_window') {
// notify if no text
if (text.trim() === '') {
try {
new Notification({
title: 'Witsy',
body: 'Please highlight the text you want to analyze'
}).show()
console.log('No text selected');
window.restoreWindows();
} catch (error) {
console.error('Error showing notification', error);
}
return;
}

return window.openChatWindow({
promptId: putCachedText(text),
engine: engine || command.engine,
model: model || command.model
})

} else if (command.action === 'paste_below') {
// log
console.debug('Text grabbed:', `${text.slice(0, 50)}…`);

await automator.moveCaretBelow()
await automator.pasteText(text)
// go on with a cached text id
const textId = Commander.putCachedText(text);
await window.openCommandPalette(textId)

} else if (command.action === 'paste_in_place') {
}

await automator.pasteText(text)
execCommand = async (app: App, textId: string, command: Command): Promise<RunCommandResponse> => {

} else if (command.action === 'clipboard_copy') {
//
const result: RunCommandResponse = {
text: Commander.getCachedText(textId),
prompt: null as string | null,
response: null as string | null,
chatWindow: null as BrowserWindow | null,
cancelled: false
};

await automator.copyToClipboard(text)
try {

}
// check
if (!result.text) {
throw new Error('No text to process');
}

}
// config
const config: Configuration = loadSettings(app);

// extract what we need
const template = command.template;
const action = command.action;
const engine = command.engine || config.commands.engine || config.llm.engine;
const model = command.model || config.commands.model || config.getActiveModel();
// const temperature = command.temperature;

// build prompt
result.prompt = template.replace('{input}', result.text);

// new window is different
if (action === 'chat_window') {

result.chatWindow = await this.finishCommand(command, result.prompt, engine, model);

} else {

// open waiting panel
window.openWaitingPanel();

// we need an llm
if (!this.llm) {
this.llm = igniteEngine(engine, config);
if (!this.llm) {
throw new Error(`Invalid LLM engine: ${engine}`)
}
}

export const prepareCommand = async (): Promise<void> => {
// now prompt llm
console.debug(`Prompting with ${result.prompt.slice(0, 50)}…`);
const response = await this.promptLlm(model, result.prompt);
result.response = response.content;

// hide active windows
window.hideActiveWindows();
await window.releaseFocus();
// if cancelled
if (this.cancelled) {
console.debug('Discarding LLM output as command was cancelled');
result.cancelled = true;
return result;
}

// grab text
const automator = new Automator();
const text = await automator.getSelectedText();
//console.log('Text grabbed', text);
// done
await window.closeWaitingPanel();
await window.releaseFocus();

// // select all
// if (text == null || text.trim() === '') {
// await automator.selectAll();
// text = await automator.getSelectedText();
// }
// now paste
console.debug(`Processing LLM output: ${result.response.slice(0, 50)}…`);
await this.finishCommand(command, result.response, engine, model);

// error
if (text == null) {
try {
new Notification({
title: 'Witsy',
body: 'An error occurred while trying to grab the text. Please check Privacy & Security settings.'
}).show()
window.restoreWindows();
} catch (error) {
console.error('Error showing notification', error);
}
return;
}
}

// notify if no text
if (text.trim() === '') {
try {
new Notification({
title: 'Witsy',
body: 'Please highlight the text you want to analyze'
}).show()
console.log('No text selected');
window.restoreWindows();
} catch (error) {
console.error('Error showing notification', error);
console.error('Error while testing', error);
}
return;

// done waiting
console.log('Destroying waiting panel')
await window.closeWaitingPanel(true);
window.releaseFocus();

// done
return result;

}

// log
console.debug('Text grabbed:', `${text.slice(0, 50)}…`);
private promptLlm = (model: string, prompt: string): Promise<LlmResponse> => {

// go on with a cached text id
const textId = putCachedText(text);
await window.openCommandPalette(textId)
// build messages
const messages: Message[] = [
new Message('user', prompt)
]

}
// now get it
return this.llm.complete(messages, { model: model })

export const runCommand = async (app: App, llm: LlmEngine, textId: string, command: Command): Promise<RunCommandResponse> => {
}

//
const result: RunCommandResponse = {
text: getCachedText(textId),
prompt: null as string | null,
response: null as string | null,
chatWindow: null as BrowserWindow | null,
};
private finishCommand = async (command: Command, text: string, engine: string, model: string): Promise<BrowserWindow|undefined> => {

// we need an automator
const automator = new Automator();

try {
if (command.action === 'chat_window') {

// check
if (!result.text) {
throw new Error('No text to process');
}
return window.openChatWindow({
promptId: Commander.putCachedText(text),
engine: engine || command.engine,
model: model || command.model
})

} else if (command.action === 'paste_below') {

// config
const config: Configuration = loadSettings(app);

// extract what we need
const template = command.template;
const action = command.action;
const engine = command.engine || config.commands.engine || config.llm.engine;
const model = command.model || config.commands.model || config.getActiveModel();
// const temperature = command.temperature;

// build prompt
result.prompt = template.replace('{input}', result.text);

// new window is different
if (action === 'chat_window') {

result.chatWindow = await finalizeCommand(command, result.prompt, engine, model);

} else {

// open waiting panel
window.openWaitingPanel();

// we need an llm
if (!llm) {
llm = igniteEngine(engine, config);
if (!llm) {
throw new Error(`Invalid LLM engine: ${engine}`)
}
}
await automator.moveCaretBelow()
await automator.pasteText(text)

} else if (command.action === 'paste_in_place') {

// now prompt llm
console.debug(`Prompting with ${result.prompt.slice(0, 50)}…`);
const response = await promptLlm(llm, model, result.prompt);
result.response = response.content;
await automator.pasteText(text)

// done
await window.closeWaitingPanel();
await window.releaseFocus();
} else if (command.action === 'clipboard_copy') {

// now paste
console.debug(`Processing LLM output: ${result.response.slice(0, 50)}…`);
await finalizeCommand(command, result.response, engine, model);
await automator.copyToClipboard(text)

}

} catch (error) {
console.error('Error while testing', error);
}

// done waiting
console.log('Destroying waiting panel')
await window.closeWaitingPanel(true);
window.releaseFocus();
static getCachedText = (id: string): string => {
const prompt = textCache[id]
delete textCache[id]
return prompt
}

// done
return result;
static putCachedText = (text: string): string => {
const id = uuidv4()
textCache[id] = text
return id
}

}
Loading

0 comments on commit b0509d5

Please sign in to comment.