@@ -4,6 +4,7 @@ import { ChatOllama } from "./ChatOllama"
4
4
import { getOpenAIConfigById } from "@/db/openai"
5
5
import { ChatOpenAI } from "@langchain/openai"
6
6
import { urlRewriteRuntime } from "@/libs/runtime"
7
+ import { ChatGoogleAI } from "./ChatGoogleAI"
7
8
8
9
export const pageAssistModel = async ( {
9
10
model,
@@ -30,18 +31,15 @@ export const pageAssistModel = async ({
30
31
numPredict ?: number
31
32
useMMap ?: boolean
32
33
} ) => {
33
-
34
34
if ( model === "chrome::gemini-nano::page-assist" ) {
35
35
return new ChatChromeAI ( {
36
36
temperature,
37
- topK,
37
+ topK
38
38
} )
39
39
}
40
40
41
-
42
41
const isCustom = isCustomModel ( model )
43
42
44
-
45
43
if ( isCustom ) {
46
44
const modelInfo = await getModelInfo ( model )
47
45
const providerInfo = await getOpenAIConfigById ( modelInfo . provider_id )
@@ -50,6 +48,20 @@ export const pageAssistModel = async ({
50
48
await urlRewriteRuntime ( providerInfo . baseUrl || "" )
51
49
}
52
50
51
+ if ( providerInfo . provider === "gemini" ) {
52
+ return new ChatGoogleAI ( {
53
+ modelName : modelInfo . model_id ,
54
+ openAIApiKey : providerInfo . apiKey || "temp" ,
55
+ temperature,
56
+ topP,
57
+ maxTokens : numPredict ,
58
+ configuration : {
59
+ apiKey : providerInfo . apiKey || "temp" ,
60
+ baseURL : providerInfo . baseUrl || ""
61
+ }
62
+ } ) as any
63
+ }
64
+
53
65
return new ChatOpenAI ( {
54
66
modelName : modelInfo . model_id ,
55
67
openAIApiKey : providerInfo . apiKey || "temp" ,
@@ -58,13 +70,11 @@ export const pageAssistModel = async ({
58
70
maxTokens : numPredict ,
59
71
configuration : {
60
72
apiKey : providerInfo . apiKey || "temp" ,
61
- baseURL : providerInfo . baseUrl || "" ,
62
- } ,
73
+ baseURL : providerInfo . baseUrl || ""
74
+ }
63
75
} ) as any
64
76
}
65
77
66
-
67
-
68
78
return new ChatOllama ( {
69
79
baseUrl,
70
80
keepAlive,
@@ -76,9 +86,6 @@ export const pageAssistModel = async ({
76
86
model,
77
87
numGpu,
78
88
numPredict,
79
- useMMap,
89
+ useMMap
80
90
} )
81
-
82
-
83
-
84
91
}
0 commit comments