Skip to content

Commit

Permalink
google wip
Browse files Browse the repository at this point in the history
  • Loading branch information
nbonamy committed May 15, 2024
1 parent f99258e commit 9b7d511
Show file tree
Hide file tree
Showing 12 changed files with 383 additions and 12 deletions.
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -67,6 +67,7 @@ To use Internet search you need a [Tavily API key](https://app.tavily.com/home).

## TODO

- [ ] Google Gemini API
- [ ] i18n
- [ ] File upload for retrieval (??)
- [ ] Proper database (SQLite3) storage (??)
Expand Down
1 change: 1 addition & 0 deletions assets/google.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
2 changes: 1 addition & 1 deletion build/build_number.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
179
181
9 changes: 9 additions & 0 deletions defaults/settings.json
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,15 @@
"chat": ""
}
},
"google": {
"models": {
"chat": [],
"image": []
},
"model": {
"chat": ""
}
},
"groq": {
"models": {
"chat": [],
Expand Down
9 changes: 9 additions & 0 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
{
"name": "witsy",
"productName": "Witsy",
"version": "1.6.0",
"version": "1.7.0",
"description": "Witsy: desktop AI assistant",
"repository": {
"type": "git",
Expand Down Expand Up @@ -65,6 +65,7 @@
"dependencies": {
"@anthropic-ai/sdk": "^0.20.4",
"@el3um4s/run-vbs": "^1.1.2",
"@google/generative-ai": "^0.11.1",
"@iktakahiro/markdown-it-katex": "^4.0.1",
"@mistralai/mistralai": "^0.1.3",
"applescript": "^1.0.0",
Expand Down
6 changes: 6 additions & 0 deletions src/components/EngineLogo.vue
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,15 @@ import logoOpenAI from '../../assets/openai.svg'
import logoOllama from '../../assets/ollama.svg'
import logoAnthropic from '../../assets/anthropic.svg'
import logoMistralAI from '../../assets/mistralai.svg'
import logoGoogle from '../../assets/google.svg'
import logoGroq from '../../assets/groq.svg'
const logos = {
openai: logoOpenAI,
ollama: logoOllama,
anthropic: logoAnthropic,
mistralai: logoMistralAI,
google: logoGoogle,
groq: logoGroq
}
Expand Down Expand Up @@ -73,4 +75,8 @@ const logo = computed(() => logos[props.engine])
filter: none;
}
.logo.grayscale.google {
filter: grayscale()
}
</style>
203 changes: 203 additions & 0 deletions src/services/google.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,203 @@

import { Message } from '../types/index.d'
import { LLmCompletionPayload, LlmChunk, LlmCompletionOpts, LlmResponse, LlmStream, LlmToolCall, LlmEventCallback } from '../types/llm.d'
import { EngineConfig, Configuration } from '../types/config.d'
import LlmEngine from './engine'
import { ChatSession, Content, EnhancedGenerateContentResponse, GenerativeModel, GoogleGenerativeAI } from '@google/generative-ai'

export const isGoogleReady = (engineConfig: EngineConfig): boolean => {
return engineConfig.apiKey?.length > 0
}

export default class extends LlmEngine {

client: GoogleGenerativeAI
currentChat: ChatSession
toolCalls: LlmToolCall[]

constructor(config: Configuration) {
super(config)
this.client = new GoogleGenerativeAI(
config.engines.google.apiKey,
)
}

getName(): string {
return 'google'
}

getVisionModels(): string[] {
return []//['gemini-pro-vision', '*vision*']
}

isVisionModel(model: string): boolean {
return this.getVisionModels().includes(model) || model.includes('vision')
}

getRountingModel(): string | null {
return null
}

async getModels(): Promise<any[]> {

// need an api key
if (!this.client.apiKey) {
return null
}

// do it
return [
{ id: 'models/gemini-1.5-pro-latest', name: 'Gemini 1.5 Pro' },
//{ id: 'gemini-1.5-flash', name: 'Gemini 1.5 Flash' },
{ id: 'models/gemini-pro', name: 'Gemini 1.0 Pro' },
{ id: 'models/gemini-pro-vision', name: 'Gemini Pro Vision' },
]
}


async complete(thread: Message[], opts: LlmCompletionOpts): Promise<LlmResponse> {

// call
const modelName = opts?.model || this.config.engines.openai.model.chat
console.log(`[openai] prompting model ${modelName}`)
const model = this.getModel(modelName, thread[0].content)
const chat = model.startChat({
history: thread.slice(1, -1).map((message) => this.messageToContent(message))
})

// done
const result = await chat.sendMessage(thread[thread.length-1].content)
return {
type: 'text',
content: result.response.text()
}
}

async stream(thread: Message[], opts: LlmCompletionOpts): Promise<LlmStream> {

// model: switch to vision if needed
const modelName = this.selectModel(thread, opts?.model || this.getChatModel())

// reset
this.toolCalls = []

// save the message thread
const payload = this.buildPayload(thread, modelName)

// call
console.log(`[openai] prompting model ${modelName}`)
const model = this.getModel(modelName, payload[0].content)
this.currentChat = model.startChat({
history: payload.slice(1, -1).map((message) => this.messageToContent(message))
})

// done
const result = await this.currentChat.sendMessageStream(payload[payload.length-1].content)
return result.stream

}

// eslint-disable-next-line @typescript-eslint/no-unused-vars
getModel(model: string, instructions: string): GenerativeModel {
return this.client.getGenerativeModel({
model: model,
//systemInstruction: instructions
// tools: [{
// functionDeclarations: this.getAvailableTools().map((tool) => {
// return tool.function
// })
// }],
}, {
apiVersion: 'v1beta'
})
}

messageToContent(message: any): Content {
return {
role: message.role == 'assistant' ? 'model' : message.role,
parts: [ { text: message.content } ]
}
}

// eslint-disable-next-line @typescript-eslint/no-unused-vars
async stop(stream: AsyncGenerator<any>) {
//await stream?.controller?.abort()
}

async streamChunkToLlmChunk(chunk: EnhancedGenerateContentResponse, eventCallback: LlmEventCallback): Promise<LlmChunk|null> {

//console.log('[google] chunk:', chunk)

// tool calls
const toolCalls = chunk.functionCalls()
if (toolCalls?.length) {

// save
this.toolCalls = toolCalls.map((tc) => {
return {
id: tc.name,
message: '',
function: tc.name,
args: JSON.stringify(tc.args),
}
})

// call
for (const toolCall of this.toolCalls) {

// first notify
eventCallback?.call(this, {
type: 'tool',
content: this.getToolPreparationDescription(toolCall.function)
})

// first notify
eventCallback?.call(this, {
type: 'tool',
content: this.getToolRunningDescription(toolCall.function)
})

// now execute
const args = JSON.parse(toolCall.args)
const content = await this.callTool(toolCall.function, args)
console.log(`[openai] tool call ${toolCall.function} with ${JSON.stringify(args)} => ${JSON.stringify(content).substring(0, 128)}`)

// send
this.currentChat.sendMessageStream([
{ functionResponse: {
name: toolCall.function,
response: content
}}
])

// clear
eventCallback?.call(this, {
type: 'tool',
content: null,
})

}

// done
return null

}

// text chunk
return {
text: chunk.text(),
done: chunk.candidates[0].finishReason === 'STOP'
}
}

// eslint-disable-next-line @typescript-eslint/no-unused-vars
addImageToPayload(message: Message, payload: LLmCompletionPayload) {
//TODO
}

// eslint-disable-next-line @typescript-eslint/no-unused-vars
async image(prompt: string, opts: LlmCompletionOpts): Promise<LlmResponse> {
return null
}

}
38 changes: 37 additions & 1 deletion src/services/llm.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,10 +5,11 @@ import OpenAI, { isOpenAIReady } from './openai'
import Ollama, { isOllamaReady } from './ollama'
import MistralAI, { isMistrailAIReady } from './mistralai'
import Anthropic, { isAnthropicReady } from './anthropic'
import Google, { isGoogleReady } from './google'
import Groq, { isGroqReady } from './groq'
import LlmEngine from './engine'

export const availableEngines = ['openai', 'ollama', 'anthropic', 'mistralai', 'groq']
export const availableEngines = ['openai', 'ollama', 'anthropic', 'mistralai', 'google', 'groq']
export const textFormats = ['pdf', 'txt', 'docx', 'pptx', 'xlsx']
export const imageFormats = ['jpeg', 'jpg', 'png', 'webp']

Expand All @@ -17,6 +18,7 @@ export const isEngineReady = (engine: string) => {
if (engine === 'ollama') return isOllamaReady(store.config.engines.ollama)
if (engine === 'mistralai') return isMistrailAIReady(store.config.engines.mistralai)
if (engine === 'anthropic') return isAnthropicReady(store.config.engines.anthropic)
if (engine === 'google') return isGoogleReady(store.config.engines.google)
if (engine === 'groq') return isGroqReady(store.config.engines.groq)
return false
}
Expand All @@ -26,6 +28,7 @@ export const igniteEngine = (engine: string, config: Configuration, fallback = '
if (engine === 'ollama') return new Ollama(config)
if (engine === 'mistralai') return new MistralAI(config)
if (engine === 'anthropic') return new Anthropic(config)
if (engine === 'google') return new Google(config)
if (engine === 'groq') return new Groq(config)
if (isEngineReady(fallback)) return igniteEngine(fallback, config)
return null
Expand Down Expand Up @@ -223,6 +226,39 @@ export const loadAnthropicModels = async () => {
return true
}

export const loadGoogleModels = async () => {

let models = []

try {
const google = new Google(store.config)
models = await google.getModels()
} catch (error) {
console.error('Error listing Google models:', error);
}
if (!models) {
store.config.engines.google.models = { chat: [], image: [], }
return false
}

// store
store.config.engines.google.models = {
chat: models
.map(model => { return {
id: model.id,
name: model.name,
meta: model
}})
//.sort((a, b) => a.name.localeCompare(b.name))
}

// select valid model
store.config.engines.google.model.chat = getValidModelId('google', 'chat', store.config.engines.google.model.chat)

// done
return true
}

export const loadGroqModels = async () => {

let models = []
Expand Down
Loading

0 comments on commit 9b7d511

Please sign in to comment.