-
Notifications
You must be signed in to change notification settings - Fork 210
/
sw.ts
59 lines (46 loc) · 1.79 KB
/
sw.ts
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
import { createOpenAI } from '@ai-sdk/openai'
import { convertToCoreMessages, streamText, ToolInvocation } from 'ai'
import * as kv from 'idb-keyval'
import { getConfigStore, type ModelProvider } from '~/components/model-provider/use-model-provider'
import { convertToCoreTools, maxMessageContext, tools } from '~/lib/tools'
type Message = {
role: 'user' | 'assistant'
content: string
toolInvocations?: (ToolInvocation & { result: any })[]
}
declare const self: ServiceWorkerGlobalScope
async function handleRequest(event: FetchEvent) {
const url = new URL(event.request.url)
const isChatRoute = url.pathname.startsWith('/api/chat') && event.request.method === 'POST'
if (isChatRoute) {
const modelProvider = await kv.get<ModelProvider>('modelProvider', getConfigStore())
if (!modelProvider?.enabled) {
return fetch(event.request)
}
const adapter = createOpenAI({
baseURL: modelProvider.baseUrl,
apiKey: modelProvider.apiKey,
})
const model = adapter(modelProvider.model)
const { messages }: { messages: Message[] } = await event.request.json()
// Trim the message context sent to the LLM to mitigate token abuse
const trimmedMessageContext = messages.slice(-maxMessageContext)
const coreMessages = convertToCoreMessages(trimmedMessageContext)
const coreTools = convertToCoreTools(tools)
try {
const result = streamText({
system: modelProvider.system,
model,
messages: coreMessages,
tools: coreTools,
})
return result.toDataStreamResponse()
} catch (error) {
return new Response(`Error streaming LLM from service worker: ${error}`, { status: 500 })
}
}
return fetch(event.request)
}
self.addEventListener('fetch', (event) => {
event.respondWith(handleRequest(event))
})