Skip to content

Commit

Permalink
more tests
Browse files Browse the repository at this point in the history
  • Loading branch information
nbonamy committed May 15, 2024
1 parent c226454 commit 3c67adb
Show file tree
Hide file tree
Showing 4 changed files with 177 additions and 56 deletions.
106 changes: 53 additions & 53 deletions src/services/google.ts
Original file line number Diff line number Diff line change
Expand Up @@ -173,60 +173,60 @@ export default class extends LlmEngine {

//console.log('[google] chunk:', chunk)

// tool calls
const toolCalls = chunk.functionCalls()
if (toolCalls?.length) {

// save
this.toolCalls = toolCalls.map((tc) => {
return {
id: tc.name,
message: '',
function: tc.name,
args: JSON.stringify(tc.args),
}
})

// call
for (const toolCall of this.toolCalls) {

// first notify
eventCallback?.call(this, {
type: 'tool',
content: this.getToolPreparationDescription(toolCall.function)
})

// first notify
eventCallback?.call(this, {
type: 'tool',
content: this.getToolRunningDescription(toolCall.function)
})

// now execute
const args = JSON.parse(toolCall.args)
const content = await this.callTool(toolCall.function, args)
console.log(`[openai] tool call ${toolCall.function} with ${JSON.stringify(args)} => ${JSON.stringify(content).substring(0, 128)}`)

// send
this.currentChat.sendMessageStream([
{ functionResponse: {
name: toolCall.function,
response: content
}}
])

// clear
eventCallback?.call(this, {
type: 'tool',
content: null,
})

}

// done
return null
// // tool calls
// const toolCalls = chunk.functionCalls()
// if (toolCalls?.length) {

// // save
// this.toolCalls = toolCalls.map((tc) => {
// return {
// id: tc.name,
// message: '',
// function: tc.name,
// args: JSON.stringify(tc.args),
// }
// })

// // call
// for (const toolCall of this.toolCalls) {

// // first notify
// eventCallback?.call(this, {
// type: 'tool',
// content: this.getToolPreparationDescription(toolCall.function)
// })

// // first notify
// eventCallback?.call(this, {
// type: 'tool',
// content: this.getToolRunningDescription(toolCall.function)
// })

// // now execute
// const args = JSON.parse(toolCall.args)
// const content = await this.callTool(toolCall.function, args)
// console.log(`[openai] tool call ${toolCall.function} with ${JSON.stringify(args)} => ${JSON.stringify(content).substring(0, 128)}`)

// // send
// this.currentChat.sendMessageStream([
// { functionResponse: {
// name: toolCall.function,
// response: content
// }}
// ])

// // clear
// eventCallback?.call(this, {
// type: 'tool',
// content: null,
// })

// }

// // done
// return null

}
// }

// text chunk
return {
Expand Down
26 changes: 25 additions & 1 deletion tests/unit/engine.test.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@

import { beforeEach, expect, test } from 'vitest'
import { isEngineReady, igniteEngine } from '../../src/services/llm'
import { isEngineReady, igniteEngine, hasVisionModels, isVisionModel } from '../../src/services/llm'
import { store } from '../../src/services/store'
import defaults from '../../defaults/settings.json'
import OpenAI from '../../src/services/openai'
Expand All @@ -22,6 +22,8 @@ test('Default Configuration', () => {
expect(isEngineReady('mistralai')).toBe(false)
expect(isEngineReady('anthropic')).toBe(false)
expect(isEngineReady('google')).toBe(false)
expect(isEngineReady('groq')).toBe(false)
expect(isEngineReady('aws')).toBe(false)
})

test('OpenAI Configuration', () => {
Expand Down Expand Up @@ -66,11 +68,33 @@ test('Google Configuration', () => {
expect(isEngineReady('google')).toBe(true)
})

test('Groq Configuration', () => {
store.config.engines.groq.models.image = [model]
expect(isEngineReady('groq')).toBe(false)
store.config.engines.groq.models.chat = [model]
expect(isEngineReady('groq')).toBe(false)
store.config.engines.groq.apiKey = '123'
expect(isEngineReady('groq')).toBe(true)
})

test('Ignite Engine', async () => {
expect(await igniteEngine('openai', store.config)).toBeInstanceOf(OpenAI)
expect(await igniteEngine('ollama', store.config)).toBeInstanceOf(Ollama)
expect(await igniteEngine('mistralai', store.config)).toBeInstanceOf(MistralAI)
expect(await igniteEngine('anthropic', store.config)).toBeInstanceOf(Anthropic)
expect(await igniteEngine('google', store.config)).toBeInstanceOf(Google)
expect(await igniteEngine('groq', store.config)).toBeInstanceOf(Groq)
expect(await igniteEngine('aws', store.config)).toBeInstanceOf(OpenAI)
expect(await igniteEngine('aws', store.config, 'aws')).toBeNull()
})

test('Has Vision Models', async () => {
expect(hasVisionModels('openai')).toBe(true)
expect(hasVisionModels('anthropic')).toBe(false)
})

test('Is Vision Model', async () => {
expect(isVisionModel('openai', 'gpt-3.5')).toBe(false)
expect(isVisionModel('openai', 'gpt-4-turbo')).toBe(true)
expect(isVisionModel('openai', 'gpt-vision')).toBe(true)
})
4 changes: 2 additions & 2 deletions tests/unit/engine_google.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -100,12 +100,12 @@ test('Google streamChunkToLlmChunk Text', async () => {
}
const llmChunk1 = await google.streamChunkToLlmChunk(streamChunk, null)
expect(streamChunk.text).toHaveBeenCalled()
expect(streamChunk.functionCalls).toHaveBeenCalled()
//expect(streamChunk.functionCalls).toHaveBeenCalled()
expect(llmChunk1).toStrictEqual({ text: 'response', done: false })
streamChunk.candidates[0].finishReason = 'STOP'
streamChunk.text = vi.fn(() => '')
const llmChunk2 = await google.streamChunkToLlmChunk(streamChunk, null)
expect(streamChunk.text).toHaveBeenCalled()
expect(streamChunk.functionCalls).toHaveBeenCalled()
//expect(streamChunk.functionCalls).toHaveBeenCalled()
expect(llmChunk2).toStrictEqual({ text: '', done: true })
})
97 changes: 97 additions & 0 deletions tests/unit/engine_groq.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,97 @@

import { vi, beforeEach, expect, test } from 'vitest'
import { store } from '../../src/services/store'
import defaults from '../../defaults/settings.json'
import Message from '../../src/models/message'
import Groq from '../../src/services/groq'
import { ChatCompletionChunk } from 'groq-sdk/lib/chat_completions_ext'
import { loadGroqModels } from '../../src/services/llm'
import { Model } from '../../src/types/config.d'

vi.mock('groq-sdk', async() => {
const Groq = vi.fn()
Groq.prototype.apiKey = '123'
Groq.prototype.listModels = vi.fn(() => {
return { data: [
{ id: 'model2', name: 'model2' },
{ id: 'model1', name: 'model1' },
] }
})
Groq.prototype.chat = {
completions: {
create: vi.fn((opts) => {
if (opts.stream) {
return {
controller: {
abort: vi.fn()
}
}
}
else {
return { choices: [{ message: { content: 'response' } }] }
}
})
}
}
return { default : Groq }
})

beforeEach(() => {
store.config = defaults
store.config.engines.groq.apiKey = '123'
})

test('Groq Load Models', async () => {
expect(await loadGroqModels()).toBe(true)
const models = store.config.engines.groq.models.chat
expect(models.map((m: Model) => { return { id: m.id, name: m.name }})).toStrictEqual([
{ id: 'gemma-7b-it-32768', name: 'Gemma 7b' },
{ id: 'llama2-70b-4096', name: 'LLaMA2 70b' },
{ id: 'llama3-70b-8192', name: 'LLaMA3 70b' },
{ id: 'llama3-8b-8192', name: 'LLaMA3 8b' },
{ id: 'mixtral-8x7b-32768', name: 'Mixtral 8x7b' },
])
expect(store.config.engines.groq.model.chat).toStrictEqual(models[0].id)
})

test('Groq Basic', async () => {
const groq = new Groq(store.config)
expect(groq.getName()).toBe('groq')
expect(groq.isVisionModel('llama2-70b-4096')).toBe(false)
expect(groq.isVisionModel('llama3-70b-8192')).toBe(false)
expect(groq.getRountingModel()).toBeNull()
})

test('Groq completion', async () => {
const groq = new Groq(store.config)
const response = await groq.complete([
new Message('system', 'instruction'),
new Message('user', 'prompt'),
], null)
expect(response).toStrictEqual({
type: 'text',
content: 'response'
})
})

test('Groq stream', async () => {
const groq = new Groq(store.config)
const response = await groq.stream([
new Message('system', 'instruction'),
new Message('user', 'prompt'),
], null)
expect(response.controller).toBeDefined()
await groq.stop(response)
})

test('Groq streamChunkToLlmChunk Text', async () => {
const groq = new Groq(store.config)
const streamChunk: ChatCompletionChunk = {
choices: [{ index: 0, delta: { content: 'response' }, finish_reason: null }],
}
const llmChunk1 = await groq.streamChunkToLlmChunk(streamChunk, null)
expect(llmChunk1).toStrictEqual({ text: 'response', done: false })
streamChunk.choices[0].finish_reason = 'stop'
const llmChunk2 = await groq.streamChunkToLlmChunk(streamChunk, null)
expect(llmChunk2).toStrictEqual({ text: '', done: true })
})

0 comments on commit 3c67adb

Please sign in to comment.