@@ -12,13 +12,7 @@ import { err2String, isOSeriesModel, safeFetch, withSuppressedTokenWarnings } fr
12
12
import { HarmBlockThreshold , HarmCategory } from "@google/generative-ai" ;
13
13
import { ChatAnthropic } from "@langchain/anthropic" ;
14
14
import { ChatCohere } from "@langchain/cohere" ;
15
- import {
16
- BaseChatModel ,
17
- type BaseChatModelParams ,
18
- } from "@langchain/core/language_models/chat_models" ;
19
- import { AIMessage , type BaseMessage , type MessageContent } from "@langchain/core/messages" ;
20
- import { type ChatResult , ChatGeneration } from "@langchain/core/outputs" ;
21
- import { type CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager" ;
15
+ import { BaseChatModel } from "@langchain/core/language_models/chat_models" ;
22
16
import { ChatDeepSeek } from "@langchain/deepseek" ;
23
17
import { ChatGoogleGenerativeAI } from "@langchain/google-genai" ;
24
18
import { ChatGroq } from "@langchain/groq" ;
@@ -28,111 +22,12 @@ import { ChatOpenAI } from "@langchain/openai";
28
22
import { ChatXAI } from "@langchain/xai" ;
29
23
import { Notice } from "obsidian" ;
30
24
import { GitHubCopilotProvider } from "./githubCopilotProvider" ;
31
-
32
- export interface CopilotChatModelParams extends BaseChatModelParams {
33
- provider : GitHubCopilotProvider ;
34
- modelName : string ;
35
- }
36
-
37
- class CopilotChatModel extends BaseChatModel {
38
- lc_serializable = false ;
39
- lc_namespace = [ "langchain" , "chat_models" , "copilot" ] ;
40
- private provider : GitHubCopilotProvider ;
41
- modelName : string ;
42
-
43
- constructor ( fields : CopilotChatModelParams ) {
44
- super ( fields ) ;
45
- this . provider = fields . provider ;
46
- this . modelName = fields . modelName ;
47
- }
48
-
49
- _llmType ( ) : string {
50
- return "copilot-chat-model" ;
51
- }
52
-
53
- private _convertMessageType ( messageType : string ) : string {
54
- switch ( messageType ) {
55
- case "human" :
56
- return "user" ;
57
- case "ai" :
58
- return "assistant" ;
59
- case "system" :
60
- return "system" ;
61
- case "tool" :
62
- return "tool" ;
63
- case "function" :
64
- return "function" ;
65
- case "generic" :
66
- default :
67
- return "user" ;
68
- }
69
- }
70
-
71
- async _generate (
72
- messages : BaseMessage [ ] ,
73
- options : this[ "ParsedCallOptions" ] ,
74
- runManager ?: CallbackManagerForLLMRun
75
- ) : Promise < ChatResult > {
76
- const chatMessages = messages . map ( ( m ) => ( {
77
- role : this . _convertMessageType ( m . _getType ( ) ) ,
78
- content : m . content as string ,
79
- } ) ) ;
80
-
81
- const response = await this . provider . sendChatMessage ( chatMessages , this . modelName ) ;
82
- const content = response . choices ?. [ 0 ] ?. message ?. content || "" ;
83
-
84
- const generation : ChatGeneration = {
85
- text : content ,
86
- message : new AIMessage ( content ) ,
87
- } ;
88
-
89
- return {
90
- generations : [ generation ] ,
91
- llmOutput : { } , // add more details here if needed
92
- } ;
93
- }
94
-
95
- /**
96
- * A simple approximation: ~4 chars per token for English text
97
- * This matches the fallback behavior in ChatModelManager.countTokens
98
- */
99
- async getNumTokens ( content : MessageContent ) : Promise < number > {
100
- const text = typeof content === "string" ? content : JSON . stringify ( content ) ;
101
- if ( ! text ) return 0 ;
102
- return Math . ceil ( text . length / 4 ) ;
103
- }
104
- }
25
+ import { ChatGitHubCopilot , CopilotChatModel } from "./githubCopilotChatModel" ;
105
26
106
27
type ChatConstructorType = {
107
28
new ( config : any ) : any ;
108
29
} ;
109
30
110
- class ChatGitHubCopilot {
111
- private provider : GitHubCopilotProvider ;
112
- constructor ( config : any ) {
113
- this . provider = new GitHubCopilotProvider ( ) ;
114
- // TODO: Use config for persistent storage, UI callbacks, etc.
115
- }
116
- async send ( messages : { role : string ; content : string } [ ] , model = "gpt-4" ) {
117
- return this . provider . sendChatMessage ( messages , model ) ;
118
- }
119
- getAuthState ( ) {
120
- return this . provider . getAuthState ( ) ;
121
- }
122
- async startAuth ( ) {
123
- return this . provider . startDeviceCodeFlow ( ) ;
124
- }
125
- async pollForAccessToken ( ) {
126
- return this . provider . pollForAccessToken ( ) ;
127
- }
128
- async fetchCopilotToken ( ) {
129
- return this . provider . fetchCopilotToken ( ) ;
130
- }
131
- resetAuth ( ) {
132
- this . provider . resetAuth ( ) ;
133
- }
134
- }
135
-
136
31
const CHAT_PROVIDER_CONSTRUCTORS = {
137
32
[ ChatModelProviders . OPENAI ] : ChatOpenAI ,
138
33
[ ChatModelProviders . AZURE_OPENAI ] : ChatOpenAI ,
0 commit comments