Skip to content

Commit ddbdf71

Browse files
committed
separate github copilot from chatmodelmanager into new file
1 parent 452be78 commit ddbdf71

File tree

2 files changed

+109
-107
lines changed

2 files changed

+109
-107
lines changed

src/LLMProviders/chatModelManager.ts

Lines changed: 2 additions & 107 deletions
Original file line numberDiff line numberDiff line change
@@ -12,13 +12,7 @@ import { err2String, isOSeriesModel, safeFetch, withSuppressedTokenWarnings } fr
1212
import { HarmBlockThreshold, HarmCategory } from "@google/generative-ai";
1313
import { ChatAnthropic } from "@langchain/anthropic";
1414
import { ChatCohere } from "@langchain/cohere";
15-
import {
16-
BaseChatModel,
17-
type BaseChatModelParams,
18-
} from "@langchain/core/language_models/chat_models";
19-
import { AIMessage, type BaseMessage, type MessageContent } from "@langchain/core/messages";
20-
import { type ChatResult, ChatGeneration } from "@langchain/core/outputs";
21-
import { type CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
15+
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
2216
import { ChatDeepSeek } from "@langchain/deepseek";
2317
import { ChatGoogleGenerativeAI } from "@langchain/google-genai";
2418
import { ChatGroq } from "@langchain/groq";
@@ -28,111 +22,12 @@ import { ChatOpenAI } from "@langchain/openai";
2822
import { ChatXAI } from "@langchain/xai";
2923
import { Notice } from "obsidian";
3024
import { GitHubCopilotProvider } from "./githubCopilotProvider";
31-
32-
export interface CopilotChatModelParams extends BaseChatModelParams {
33-
provider: GitHubCopilotProvider;
34-
modelName: string;
35-
}
36-
37-
class CopilotChatModel extends BaseChatModel {
38-
lc_serializable = false;
39-
lc_namespace = ["langchain", "chat_models", "copilot"];
40-
private provider: GitHubCopilotProvider;
41-
modelName: string;
42-
43-
constructor(fields: CopilotChatModelParams) {
44-
super(fields);
45-
this.provider = fields.provider;
46-
this.modelName = fields.modelName;
47-
}
48-
49-
_llmType(): string {
50-
return "copilot-chat-model";
51-
}
52-
53-
private _convertMessageType(messageType: string): string {
54-
switch (messageType) {
55-
case "human":
56-
return "user";
57-
case "ai":
58-
return "assistant";
59-
case "system":
60-
return "system";
61-
case "tool":
62-
return "tool";
63-
case "function":
64-
return "function";
65-
case "generic":
66-
default:
67-
return "user";
68-
}
69-
}
70-
71-
async _generate(
72-
messages: BaseMessage[],
73-
options: this["ParsedCallOptions"],
74-
runManager?: CallbackManagerForLLMRun
75-
): Promise<ChatResult> {
76-
const chatMessages = messages.map((m) => ({
77-
role: this._convertMessageType(m._getType()),
78-
content: m.content as string,
79-
}));
80-
81-
const response = await this.provider.sendChatMessage(chatMessages, this.modelName);
82-
const content = response.choices?.[0]?.message?.content || "";
83-
84-
const generation: ChatGeneration = {
85-
text: content,
86-
message: new AIMessage(content),
87-
};
88-
89-
return {
90-
generations: [generation],
91-
llmOutput: {}, // add more details here if needed
92-
};
93-
}
94-
95-
/**
96-
* A simple approximation: ~4 chars per token for English text
97-
* This matches the fallback behavior in ChatModelManager.countTokens
98-
*/
99-
async getNumTokens(content: MessageContent): Promise<number> {
100-
const text = typeof content === "string" ? content : JSON.stringify(content);
101-
if (!text) return 0;
102-
return Math.ceil(text.length / 4);
103-
}
104-
}
25+
import { ChatGitHubCopilot, CopilotChatModel } from "./githubCopilotChatModel";
10526

10627
type ChatConstructorType = {
10728
new (config: any): any;
10829
};
10930

110-
class ChatGitHubCopilot {
111-
private provider: GitHubCopilotProvider;
112-
constructor(config: any) {
113-
this.provider = new GitHubCopilotProvider();
114-
// TODO: Use config for persistent storage, UI callbacks, etc.
115-
}
116-
async send(messages: { role: string; content: string }[], model = "gpt-4") {
117-
return this.provider.sendChatMessage(messages, model);
118-
}
119-
getAuthState() {
120-
return this.provider.getAuthState();
121-
}
122-
async startAuth() {
123-
return this.provider.startDeviceCodeFlow();
124-
}
125-
async pollForAccessToken() {
126-
return this.provider.pollForAccessToken();
127-
}
128-
async fetchCopilotToken() {
129-
return this.provider.fetchCopilotToken();
130-
}
131-
resetAuth() {
132-
this.provider.resetAuth();
133-
}
134-
}
135-
13631
const CHAT_PROVIDER_CONSTRUCTORS = {
13732
[ChatModelProviders.OPENAI]: ChatOpenAI,
13833
[ChatModelProviders.AZURE_OPENAI]: ChatOpenAI,
Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
import {
2+
BaseChatModel,
3+
type BaseChatModelParams,
4+
} from "@langchain/core/language_models/chat_models";
5+
import { AIMessage, type BaseMessage, type MessageContent } from "@langchain/core/messages";
6+
import { type ChatResult, ChatGeneration } from "@langchain/core/outputs";
7+
import { type CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager";
8+
import { GitHubCopilotProvider } from "./githubCopilotProvider";
9+
10+
export interface CopilotChatModelParams extends BaseChatModelParams {
11+
provider: GitHubCopilotProvider;
12+
modelName: string;
13+
}
14+
15+
export class CopilotChatModel extends BaseChatModel {
16+
lc_serializable = false;
17+
lc_namespace = ["langchain", "chat_models", "copilot"];
18+
private provider: GitHubCopilotProvider;
19+
modelName: string;
20+
21+
constructor(fields: CopilotChatModelParams) {
22+
super(fields);
23+
this.provider = fields.provider;
24+
this.modelName = fields.modelName;
25+
}
26+
27+
_llmType(): string {
28+
return "copilot-chat-model";
29+
}
30+
31+
private _convertMessageType(messageType: string): string {
32+
switch (messageType) {
33+
case "human":
34+
return "user";
35+
case "ai":
36+
return "assistant";
37+
case "system":
38+
return "system";
39+
case "tool":
40+
return "tool";
41+
case "function":
42+
return "function";
43+
case "generic":
44+
default:
45+
return "user";
46+
}
47+
}
48+
49+
async _generate(
50+
messages: BaseMessage[],
51+
options: this["ParsedCallOptions"],
52+
runManager?: CallbackManagerForLLMRun
53+
): Promise<ChatResult> {
54+
const chatMessages = messages.map((m) => ({
55+
role: this._convertMessageType(m._getType()),
56+
content: m.content as string,
57+
}));
58+
59+
const response = await this.provider.sendChatMessage(chatMessages, this.modelName);
60+
const content = response.choices?.[0]?.message?.content || "";
61+
62+
const generation: ChatGeneration = {
63+
text: content,
64+
message: new AIMessage(content),
65+
};
66+
67+
return {
68+
generations: [generation],
69+
llmOutput: {}, // add more details here if needed
70+
};
71+
}
72+
73+
/**
74+
* A simple approximation: ~4 chars per token for English text
75+
* This matches the fallback behavior in ChatModelManager.countTokens
76+
*/
77+
async getNumTokens(content: MessageContent): Promise<number> {
78+
const text = typeof content === "string" ? content : JSON.stringify(content);
79+
if (!text) return 0;
80+
return Math.ceil(text.length / 4);
81+
}
82+
}
83+
84+
export class ChatGitHubCopilot {
85+
private provider: GitHubCopilotProvider;
86+
constructor(config: any) {
87+
this.provider = new GitHubCopilotProvider();
88+
}
89+
async send(messages: { role: string; content: string }[], model = "gpt-4") {
90+
return this.provider.sendChatMessage(messages, model);
91+
}
92+
getAuthState() {
93+
return this.provider.getAuthState();
94+
}
95+
async startAuth() {
96+
return this.provider.startDeviceCodeFlow();
97+
}
98+
async pollForAccessToken() {
99+
return this.provider.pollForAccessToken();
100+
}
101+
async fetchCopilotToken() {
102+
return this.provider.fetchCopilotToken();
103+
}
104+
resetAuth() {
105+
this.provider.resetAuth();
106+
}
107+
}

0 commit comments

Comments
 (0)