Skip to content

Commit

Permalink
Merge main into sweep/fix-lockfile-compatibility
Browse files Browse the repository at this point in the history
  • Loading branch information
sweep-ai[bot] authored Nov 10, 2023
2 parents 6f7c239 + ced3555 commit e2b6c38
Show file tree
Hide file tree
Showing 6 changed files with 194 additions and 98 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -37,3 +37,6 @@ yarn-error.log*
.vercel

dist/

# vs code
.vscode/launch.json
2 changes: 1 addition & 1 deletion packages/core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
"md-utils-ts": "^2.0.0",
"mongodb": "^6.2.0",
"notion-md-crawler": "^0.0.2",
"openai": "^4.15.4",
"openai": "^4.16.1",
"papaparse": "^5.4.1",
"pdf-parse": "^1.1.1",
"portkey-ai": "^0.1.16",
Expand Down
78 changes: 56 additions & 22 deletions packages/core/src/ChatEngine.ts
Original file line number Diff line number Diff line change
@@ -1,8 +1,5 @@
import { v4 as uuidv4 } from "uuid";
import { Event } from "./callbacks/CallbackManager";
import { ChatHistory } from "./ChatHistory";
import { BaseNodePostprocessor } from "./indices/BaseNodePostprocessor";
import { ChatMessage, LLM, OpenAI } from "./llm/LLM";
import { NodeWithScore, TextNode } from "./Node";
import {
CondenseQuestionPrompt,
Expand All @@ -15,6 +12,9 @@ import { BaseQueryEngine } from "./QueryEngine";
import { Response } from "./Response";
import { BaseRetriever } from "./Retriever";
import { ServiceContext, serviceContextFromDefaults } from "./ServiceContext";
import { Event } from "./callbacks/CallbackManager";
import { BaseNodePostprocessor } from "./indices/BaseNodePostprocessor";
import { ChatMessage, LLM, OpenAI } from "./llm/LLM";

/**
* A ChatEngine is used to handle back and forth chats between the application and the LLM.
Expand Down Expand Up @@ -328,6 +328,17 @@ export class ContextChatEngine implements ChatEngine {
}
}

export interface MessageContentDetail {
type: "text" | "image_url";
text: string;
image_url: { url: string };
}

/**
* Extended type for the content of a message that allows for multi-modal messages.
*/
export type MessageContent = string | MessageContentDetail[];

/**
* HistoryChatEngine is a ChatEngine that uses a `ChatHistory` object
* to keeps track of chat's message history.
Expand All @@ -347,38 +358,34 @@ export class HistoryChatEngine {
async chat<
T extends boolean | undefined = undefined,
R = T extends true ? AsyncGenerator<string, void, unknown> : Response,
>(message: string, chatHistory: ChatHistory, streaming?: T): Promise<R> {
>(
message: MessageContent,
chatHistory: ChatHistory,
streaming?: T,
): Promise<R> {
//Streaming option
if (streaming) {
return this.streamChat(message, chatHistory) as R;
}
const context = await this.contextGenerator?.generate(message);
chatHistory.addMessage({
content: message,
role: "user",
});
const response = await this.llm.chat(
await chatHistory.requestMessages(
context ? [context.message] : undefined,
),
const requestMessages = await this.prepareRequestMessages(
message,
chatHistory,
);
const response = await this.llm.chat(requestMessages);
chatHistory.addMessage(response.message);
return new Response(response.message.content) as R;
}

protected async *streamChat(
message: string,
message: MessageContent,
chatHistory: ChatHistory,
): AsyncGenerator<string, void, unknown> {
const context = await this.contextGenerator?.generate(message);
chatHistory.addMessage({
content: message,
role: "user",
});
const requestMessages = await this.prepareRequestMessages(
message,
chatHistory,
);
const response_stream = await this.llm.chat(
await chatHistory.requestMessages(
context ? [context.message] : undefined,
),
requestMessages,
undefined,
true,
);
Expand All @@ -394,4 +401,31 @@ export class HistoryChatEngine {
});
return;
}

private async prepareRequestMessages(
message: MessageContent,
chatHistory: ChatHistory,
) {
chatHistory.addMessage({
content: message,
role: "user",
});
let requestMessages;
let context;
if (this.contextGenerator) {
if (Array.isArray(message)) {
// message is of type MessageContentDetail[] - retrieve just the text parts and concatenate them
// so we can pass them to the context generator
message = (message as MessageContentDetail[])
.filter((c) => c.type === "text")
.map((c) => c.text)
.join("\n\n");
}
context = await this.contextGenerator.generate(message);
}
requestMessages = await chatHistory.requestMessages(
context ? [context.message] : undefined,
);
return requestMessages;
}
}
2 changes: 1 addition & 1 deletion packages/core/src/callbacks/CallbackManager.ts
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ export interface DefaultStreamToken {
index: number;
delta: {
content?: string | null;
role?: "user" | "assistant" | "system" | "function";
role?: "user" | "assistant" | "system" | "function" | "tool";
};
finish_reason: string | null;
}[];
Expand Down
27 changes: 17 additions & 10 deletions packages/core/src/llm/LLM.ts
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,13 @@ import {
StreamCallbackResponse,
} from "../callbacks/CallbackManager";

import { ChatCompletionMessageParam } from "openai/resources";
import { LLMOptions } from "portkey-ai";
import { globalsHelper, Tokenizers } from "../GlobalsHelper";
import {
AnthropicSession,
ANTHROPIC_AI_PROMPT,
ANTHROPIC_HUMAN_PROMPT,
AnthropicSession,
getAnthropicSession,
} from "./anthropic";
import {
Expand All @@ -36,7 +37,7 @@ export type MessageType =
| "memory";

export interface ChatMessage {
content: string;
content: any;
role: MessageType;
}

Expand Down Expand Up @@ -253,10 +254,13 @@ export class OpenAI implements LLM {
model: this.model,
temperature: this.temperature,
max_tokens: this.maxTokens,
messages: messages.map((message) => ({
role: this.mapMessageType(message.role),
content: message.content,
})),
messages: messages.map(
(message) =>
({
role: this.mapMessageType(message.role),
content: message.content,
}) as ChatCompletionMessageParam,
),
top_p: this.topP,
...this.additionalChatOptions,
};
Expand Down Expand Up @@ -301,10 +305,13 @@ export class OpenAI implements LLM {
model: this.model,
temperature: this.temperature,
max_tokens: this.maxTokens,
messages: messages.map((message) => ({
role: this.mapMessageType(message.role),
content: message.content,
})),
messages: messages.map(
(message) =>
({
role: this.mapMessageType(message.role),
content: message.content,
}) as ChatCompletionMessageParam,
),
top_p: this.topP,
...this.additionalChatOptions,
};
Expand Down
Loading

0 comments on commit e2b6c38

Please sign in to comment.