Skip to content

Commit

Permalink
Merge pull request #11 from technologiestiftung/feat/system-prompt-vi…
Browse files Browse the repository at this point in the history
…a-frontend

feat: system prompt via frontend
  • Loading branch information
Jaszkowic authored Aug 22, 2024
2 parents f8d37dd + fd73ca8 commit b2ad76b
Show file tree
Hide file tree
Showing 5 changed files with 28 additions and 19 deletions.
6 changes: 5 additions & 1 deletion src/controllers/chat-controller.ts
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import {
ChatRequest,
ChatResponse,
} from "../types/chat-types";
import { resolveMessagesForSystemPrompt } from "../fixtures/system-prompt";

export const chatWithLLM = async (
req: Request<{}, {}, ChatRequest>,
Expand All @@ -19,7 +20,10 @@ export const chatWithLLM = async (
const { messages } = req.body;
const llmHandler = resolveLlmHandler(llm);

const llmRespone = await llmHandler.chatCompletion(messages);
const resolvedMessages = resolveMessagesForSystemPrompt(messages);

const llmRespone = await llmHandler.chatCompletion(resolvedMessages);

if (llmRespone.stream) {
await pipeline(llmRespone.stream, res);
return;
Expand Down
21 changes: 20 additions & 1 deletion src/fixtures/system-prompt.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,23 @@
export const SYSTEM_PROMPT = `
import { ChatMessage } from "../types/chat-types";

export const resolveMessagesForSystemPrompt = (
messages: ChatMessage[]
): ChatMessage[] => {
const systemPromptMessages = messages.filter(
(message) => message.role === "system"
);

// If there are no system prompt messages, add the default system prompt message
if (systemPromptMessages.length === 0) {
return [{ role: "system", content: DEFAULT_SYSTEM_PROMPT }].concat(
messages
) as ChatMessage[];
}

return messages;
};

const DEFAULT_SYSTEM_PROMPT = `
Sie sind BärGPT, ein virtueller Assistent für die öffentliche Verwaltung in Berlin. Ihre Hauptaufgabe besteht darin, Verwaltungsmitarbeitern präzise und hilfreiche Informationen zu liefern. Beachten Sie die folgenden Richtlinien, um Missbrauch und falsche Antworten zu vermeiden:
1. **Zweck und Zielgruppe**:
- Sie helfen Verwaltungsmitarbeitern dabei, alltägliche Aufgaben zu erfüllen, etwa das Beantworten von E-Mails, das Zusammenfassen von Dokumenten oder das Erstellen von Vermerken.
Expand Down
7 changes: 1 addition & 6 deletions src/llm-handlers/azure-llm-handler.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import { config } from "..";
import { SYSTEM_PROMPT } from "../fixtures/system-prompt";
import { ChatMessage } from "../types/chat-types";
import { LLMHandler, LLMResponse } from "../types/llm-handler-types";
import { convertWebStreamToNodeStream } from "../utils/stream-utils";
Expand All @@ -18,18 +17,14 @@ export class AzureLLMHandler implements LLMHandler {
async chatCompletion(messages: ChatMessage[]): Promise<LLMResponse> {
let endpoint = `${this.endpoint}&api-key=${config.azureLlmApiKey}`;

const messagesWithSystemPromps = [
{ role: "system", content: SYSTEM_PROMPT },
].concat(messages);

try {
const response = await fetch(endpoint, {
method: "POST",
headers: {
"Content-Type": "application/json",
},
body: JSON.stringify({
messages: messagesWithSystemPromps,
messages: messages,
temperature: LLM_PARAMETERS.temperature,
stream: LLM_PARAMETERS.stream,
}),
Expand Down
7 changes: 1 addition & 6 deletions src/llm-handlers/ollama-llm-handler.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import { config } from "..";
import { SYSTEM_PROMPT } from "../fixtures/system-prompt";
import { ChatMessage } from "../types/chat-types";
import { LLMHandler, LLMResponse } from "../types/llm-handler-types";
import { convertWebStreamToNodeStream } from "../utils/stream-utils";
Expand All @@ -16,10 +15,6 @@ export class OllamaLlmHandler implements LLMHandler {
}

async chatCompletion(messages: ChatMessage[]): Promise<LLMResponse> {
const messagesWithSystemPromps = [
{ role: "system", content: SYSTEM_PROMPT },
].concat(messages);

try {
const response = await fetch(this.endpoint, {
method: "POST",
Expand All @@ -28,7 +23,7 @@ export class OllamaLlmHandler implements LLMHandler {
},
body: JSON.stringify({
model: this.model,
messages: messagesWithSystemPromps,
messages: messages,
options: { temperature: LLM_PARAMETERS.temperature },
stream: LLM_PARAMETERS.stream,
}),
Expand Down
6 changes: 1 addition & 5 deletions src/llm-handlers/openai-handler.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import { config } from "..";
import { SYSTEM_PROMPT } from "../fixtures/system-prompt";
import { ChatMessage } from "../types/chat-types";
import { LLMHandler, LLMResponse } from "../types/llm-handler-types";
import { convertWebStreamToNodeStream } from "../utils/stream-utils";
Expand All @@ -16,9 +15,6 @@ export class OpenAILLMHandler implements LLMHandler {
}

async chatCompletion(messages: ChatMessage[]): Promise<LLMResponse> {
const messagesWithSystemPromps = [
{ role: "system", content: SYSTEM_PROMPT },
].concat(messages);
try {
// Check if the message contains inappropriate content by using the /moderations endpoint
const moderationsResponse = await fetch(`${this.endpoint}/moderations`, {
Expand Down Expand Up @@ -56,7 +52,7 @@ export class OpenAILLMHandler implements LLMHandler {
},
body: JSON.stringify({
model: this.model,
messages: messagesWithSystemPromps,
messages: messages,
temperature: LLM_PARAMETERS.temperature,
stream: LLM_PARAMETERS.stream,
}),
Expand Down

0 comments on commit b2ad76b

Please sign in to comment.