From 3c67adbb56a6c94af21fbd625e49670fe11f3d26 Mon Sep 17 00:00:00 2001 From: Nicolas Bonamy Date: Wed, 15 May 2024 12:15:48 -0500 Subject: [PATCH] more tests --- src/services/google.ts | 106 +++++++++++++++---------------- tests/unit/engine.test.ts | 26 +++++++- tests/unit/engine_google.test.ts | 4 +- tests/unit/engine_groq.test.ts | 97 ++++++++++++++++++++++++++++ 4 files changed, 177 insertions(+), 56 deletions(-) create mode 100644 tests/unit/engine_groq.test.ts diff --git a/src/services/google.ts b/src/services/google.ts index c30102e..5d898d8 100644 --- a/src/services/google.ts +++ b/src/services/google.ts @@ -173,60 +173,60 @@ export default class extends LlmEngine { //console.log('[google] chunk:', chunk) - // tool calls - const toolCalls = chunk.functionCalls() - if (toolCalls?.length) { - - // save - this.toolCalls = toolCalls.map((tc) => { - return { - id: tc.name, - message: '', - function: tc.name, - args: JSON.stringify(tc.args), - } - }) - - // call - for (const toolCall of this.toolCalls) { - - // first notify - eventCallback?.call(this, { - type: 'tool', - content: this.getToolPreparationDescription(toolCall.function) - }) - - // first notify - eventCallback?.call(this, { - type: 'tool', - content: this.getToolRunningDescription(toolCall.function) - }) - - // now execute - const args = JSON.parse(toolCall.args) - const content = await this.callTool(toolCall.function, args) - console.log(`[openai] tool call ${toolCall.function} with ${JSON.stringify(args)} => ${JSON.stringify(content).substring(0, 128)}`) - - // send - this.currentChat.sendMessageStream([ - { functionResponse: { - name: toolCall.function, - response: content - }} - ]) - - // clear - eventCallback?.call(this, { - type: 'tool', - content: null, - }) - - } - - // done - return null + // // tool calls + // const toolCalls = chunk.functionCalls() + // if (toolCalls?.length) { + + // // save + // this.toolCalls = toolCalls.map((tc) => { + // return { + // id: tc.name, + // message: '', + // function: tc.name, + // args: JSON.stringify(tc.args), + // } + // }) + + // // call + // for (const toolCall of this.toolCalls) { + + // // first notify + // eventCallback?.call(this, { + // type: 'tool', + // content: this.getToolPreparationDescription(toolCall.function) + // }) + + // // first notify + // eventCallback?.call(this, { + // type: 'tool', + // content: this.getToolRunningDescription(toolCall.function) + // }) + + // // now execute + // const args = JSON.parse(toolCall.args) + // const content = await this.callTool(toolCall.function, args) + // console.log(`[openai] tool call ${toolCall.function} with ${JSON.stringify(args)} => ${JSON.stringify(content).substring(0, 128)}`) + + // // send + // this.currentChat.sendMessageStream([ + // { functionResponse: { + // name: toolCall.function, + // response: content + // }} + // ]) + + // // clear + // eventCallback?.call(this, { + // type: 'tool', + // content: null, + // }) + + // } + + // // done + // return null - } + // } // text chunk return { diff --git a/tests/unit/engine.test.ts b/tests/unit/engine.test.ts index 32e011e..521006d 100644 --- a/tests/unit/engine.test.ts +++ b/tests/unit/engine.test.ts @@ -1,6 +1,6 @@ import { beforeEach, expect, test } from 'vitest' -import { isEngineReady, igniteEngine } from '../../src/services/llm' +import { isEngineReady, igniteEngine, hasVisionModels, isVisionModel } from '../../src/services/llm' import { store } from '../../src/services/store' import defaults from '../../defaults/settings.json' import OpenAI from '../../src/services/openai' @@ -22,6 +22,8 @@ test('Default Configuration', () => { expect(isEngineReady('mistralai')).toBe(false) expect(isEngineReady('anthropic')).toBe(false) expect(isEngineReady('google')).toBe(false) + expect(isEngineReady('groq')).toBe(false) + expect(isEngineReady('aws')).toBe(false) }) test('OpenAI Configuration', () => { @@ -66,6 +68,15 @@ test('Google Configuration', () => { expect(isEngineReady('google')).toBe(true) }) +test('Groq Configuration', () => { + store.config.engines.groq.models.image = [model] + expect(isEngineReady('groq')).toBe(false) + store.config.engines.groq.models.chat = [model] + expect(isEngineReady('groq')).toBe(false) + store.config.engines.groq.apiKey = '123' + expect(isEngineReady('groq')).toBe(true) +}) + test('Ignite Engine', async () => { expect(await igniteEngine('openai', store.config)).toBeInstanceOf(OpenAI) expect(await igniteEngine('ollama', store.config)).toBeInstanceOf(Ollama) @@ -73,4 +84,17 @@ test('Ignite Engine', async () => { expect(await igniteEngine('anthropic', store.config)).toBeInstanceOf(Anthropic) expect(await igniteEngine('google', store.config)).toBeInstanceOf(Google) expect(await igniteEngine('groq', store.config)).toBeInstanceOf(Groq) + expect(await igniteEngine('aws', store.config)).toBeInstanceOf(OpenAI) + expect(await igniteEngine('aws', store.config, 'aws')).toBeNull() +}) + +test('Has Vision Models', async () => { + expect(hasVisionModels('openai')).toBe(true) + expect(hasVisionModels('anthropic')).toBe(false) +}) + +test('Is Vision Model', async () => { + expect(isVisionModel('openai', 'gpt-3.5')).toBe(false) + expect(isVisionModel('openai', 'gpt-4-turbo')).toBe(true) + expect(isVisionModel('openai', 'gpt-vision')).toBe(true) }) diff --git a/tests/unit/engine_google.test.ts b/tests/unit/engine_google.test.ts index 418791c..95a6f91 100644 --- a/tests/unit/engine_google.test.ts +++ b/tests/unit/engine_google.test.ts @@ -100,12 +100,12 @@ test('Google streamChunkToLlmChunk Text', async () => { } const llmChunk1 = await google.streamChunkToLlmChunk(streamChunk, null) expect(streamChunk.text).toHaveBeenCalled() - expect(streamChunk.functionCalls).toHaveBeenCalled() + //expect(streamChunk.functionCalls).toHaveBeenCalled() expect(llmChunk1).toStrictEqual({ text: 'response', done: false }) streamChunk.candidates[0].finishReason = 'STOP' streamChunk.text = vi.fn(() => '') const llmChunk2 = await google.streamChunkToLlmChunk(streamChunk, null) expect(streamChunk.text).toHaveBeenCalled() - expect(streamChunk.functionCalls).toHaveBeenCalled() + //expect(streamChunk.functionCalls).toHaveBeenCalled() expect(llmChunk2).toStrictEqual({ text: '', done: true }) }) diff --git a/tests/unit/engine_groq.test.ts b/tests/unit/engine_groq.test.ts new file mode 100644 index 0000000..c427a27 --- /dev/null +++ b/tests/unit/engine_groq.test.ts @@ -0,0 +1,97 @@ + +import { vi, beforeEach, expect, test } from 'vitest' +import { store } from '../../src/services/store' +import defaults from '../../defaults/settings.json' +import Message from '../../src/models/message' +import Groq from '../../src/services/groq' +import { ChatCompletionChunk } from 'groq-sdk/lib/chat_completions_ext' +import { loadGroqModels } from '../../src/services/llm' +import { Model } from '../../src/types/config.d' + +vi.mock('groq-sdk', async() => { + const Groq = vi.fn() + Groq.prototype.apiKey = '123' + Groq.prototype.listModels = vi.fn(() => { + return { data: [ + { id: 'model2', name: 'model2' }, + { id: 'model1', name: 'model1' }, + ] } + }) + Groq.prototype.chat = { + completions: { + create: vi.fn((opts) => { + if (opts.stream) { + return { + controller: { + abort: vi.fn() + } + } + } + else { + return { choices: [{ message: { content: 'response' } }] } + } + }) + } + } + return { default : Groq } +}) + +beforeEach(() => { + store.config = defaults + store.config.engines.groq.apiKey = '123' +}) + +test('Groq Load Models', async () => { + expect(await loadGroqModels()).toBe(true) + const models = store.config.engines.groq.models.chat + expect(models.map((m: Model) => { return { id: m.id, name: m.name }})).toStrictEqual([ + { id: 'gemma-7b-it-32768', name: 'Gemma 7b' }, + { id: 'llama2-70b-4096', name: 'LLaMA2 70b' }, + { id: 'llama3-70b-8192', name: 'LLaMA3 70b' }, + { id: 'llama3-8b-8192', name: 'LLaMA3 8b' }, + { id: 'mixtral-8x7b-32768', name: 'Mixtral 8x7b' }, + ]) + expect(store.config.engines.groq.model.chat).toStrictEqual(models[0].id) +}) + +test('Groq Basic', async () => { + const groq = new Groq(store.config) + expect(groq.getName()).toBe('groq') + expect(groq.isVisionModel('llama2-70b-4096')).toBe(false) + expect(groq.isVisionModel('llama3-70b-8192')).toBe(false) + expect(groq.getRountingModel()).toBeNull() +}) + +test('Groq completion', async () => { + const groq = new Groq(store.config) + const response = await groq.complete([ + new Message('system', 'instruction'), + new Message('user', 'prompt'), + ], null) + expect(response).toStrictEqual({ + type: 'text', + content: 'response' + }) +}) + +test('Groq stream', async () => { + const groq = new Groq(store.config) + const response = await groq.stream([ + new Message('system', 'instruction'), + new Message('user', 'prompt'), + ], null) + expect(response.controller).toBeDefined() + await groq.stop(response) +}) + +test('Groq streamChunkToLlmChunk Text', async () => { + const groq = new Groq(store.config) + const streamChunk: ChatCompletionChunk = { + choices: [{ index: 0, delta: { content: 'response' }, finish_reason: null }], + } + const llmChunk1 = await groq.streamChunkToLlmChunk(streamChunk, null) + expect(llmChunk1).toStrictEqual({ text: 'response', done: false }) + streamChunk.choices[0].finish_reason = 'stop' + const llmChunk2 = await groq.streamChunkToLlmChunk(streamChunk, null) + expect(llmChunk2).toStrictEqual({ text: '', done: true }) +})