Skip to content

Commit ca18580

Browse files
committed
fix: improve performance in AI request logging
Instead of appending AI requests line by line to the output channel, we now append them in a single step. Since appending to a channel is a relatively expensive operation, this change significantly improves performance for large requests. This is particularly noticeable when AI inline code completions are enabled for large files, as many requests may be sent during typing.
1 parent 445b35b commit ca18580

File tree

3 files changed

+5
-2
lines changed

3 files changed

+5
-2
lines changed

Diff for: packages/ai-code-completion/src/browser/code-completion-agent.ts

+3
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,9 @@ export class CodeCompletionAgentImpl implements CodeCompletionAgent {
114114
const requestId = generateUuid();
115115
const request: LanguageModelRequest = {
116116
messages: [{ type: 'text', actor: 'user', query: prompt }],
117+
settings: {
118+
stream: false
119+
}
117120
};
118121
if (token.isCancellationRequested) {
119122
return undefined;

Diff for: packages/ai-core/src/browser/frontend-language-model-registry.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -366,7 +366,7 @@ const languageModelOutputHandler = (
366366
'Sending request:'
367367
);
368368
const formattedRequest = formatJsonWithIndentation(args[0]);
369-
formattedRequest.forEach(line => outputChannel.appendLine(line));
369+
outputChannel.append(formattedRequest.join('\n'));
370370
if (args[1]) {
371371
args[1] = new Proxy(args[1], {
372372
get<CK extends keyof CancellationToken>(

Diff for: packages/ai-openai/src/node/openai-language-model.ts

+1-1
Original file line numberDiff line numberDiff line change
@@ -66,7 +66,7 @@ export class OpenAiModel implements LanguageModel {
6666
const settings = this.getSettings(request);
6767
const openai = this.initializeOpenAi();
6868

69-
if (this.isNonStreamingModel(this.model)) {
69+
if (this.isNonStreamingModel(this.model) || (typeof settings.stream === 'boolean' && !settings.stream)) {
7070
return this.handleNonStreamingRequest(openai, request);
7171
}
7272

0 commit comments

Comments
 (0)