From deb7a0e36c47706c9a84c1ae6e245dc12b61c2c9 Mon Sep 17 00:00:00 2001 From: Akhilesh Negi Date: Sun, 7 Apr 2024 18:59:52 +0530 Subject: [PATCH 1/2] adding support for system level prompt --- llm/api.py | 18 +++++++++++------- llm/utils/prompt.py | 4 ++-- 2 files changed, 13 insertions(+), 9 deletions(-) diff --git a/llm/api.py b/llm/api.py index 767535c..e8d07af 100644 --- a/llm/api.py +++ b/llm/api.py @@ -49,7 +49,10 @@ def create_chat(request): openai.api_key = organization.openai_key - prompt = request.data.get("prompt").strip() + question = request.data.get("question").strip() + system_prompt = request.data.get( + "system_prompt", organization.system_prompt + ).strip() gpt_model = request.data.get("gpt_model", "gpt-3.5-turbo").strip() session_id = (request.data.get("session_id") or generate_session_id()).strip() @@ -59,7 +62,7 @@ def create_chat(request): messages=[ { "role": "user", - "content": f"Detect the languages in this text: {prompt}", + "content": f"Detect the languages in this text: {question}", } ], functions=[ @@ -111,7 +114,7 @@ def create_chat(request): # 2. Pull relevant chunks from vector database prompt_embeddings = openai.Embedding.create( - model="text-embedding-ada-002", input=prompt + model="text-embedding-ada-002", input=question )["data"][0]["embedding"] embedding_results = ( @@ -146,10 +149,11 @@ def create_chat(request): # 3. Fetch the chat history from our message store to send to openai and back in the response historical_chats = Message.objects.filter(session_id=session_id).all() - # 4. Retrievial question and answer (2nd call to OpenAI, use language from 1. to help LLM respond in same language as user question) + # 4. Retrieval question and answer (2nd call to OpenAI, use language from 1. to help LLM respond in same language as user question) response = openai.ChatCompletion.create( model=gpt_model, messages=context_prompt_messages( + system_prompt, organization.id, language_results["language"], relevant_english_context, @@ -168,7 +172,7 @@ def create_chat(request): evaluator_prompts = organization.evaluator_prompts # { 'coherence': ... } for criteria, evaluator_prompt in evaluator_prompts.items(): score = evaluate_criteria_score( - evaluator_prompt, prompt, prompt_response, gpt_model + evaluator_prompt, question, prompt_response, gpt_model ) evaluation_scores[criteria] = score logger.info(f"Evaluated criteria: {criteria} with score: {score}") @@ -182,7 +186,7 @@ def create_chat(request): Message.objects.create( session_id=session_id, role="user", - message=prompt, + message=question, evaluation_score=evaluation_scores, ) Message.objects.create( @@ -195,7 +199,7 @@ def create_chat(request): return JsonResponse( { - "question": prompt, + "question": question, "answer": prompt_response.content, "language_results": language_results, "embedding_results_count": len(embedding_results), diff --git a/llm/utils/prompt.py b/llm/utils/prompt.py index eab53af..7ec47d1 100644 --- a/llm/utils/prompt.py +++ b/llm/utils/prompt.py @@ -9,6 +9,7 @@ def context_prompt_messages( + system_prompt: str, organization_id: int, language: str, english_context: str, @@ -17,10 +18,9 @@ def context_prompt_messages( ) -> list[dict]: org = Organization.objects.filter(id=organization_id).first() - org_system_prompt = org.system_prompt examples_text = org.examples_text - system_message_prompt = {"role": "system", "content": org_system_prompt} + system_message_prompt = {"role": "system", "content": system_prompt} human_message_prompt = { "role": "user", "content": f"""Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer. From f7193c7a0f4738773624fb903a6f689fe202ea04 Mon Sep 17 00:00:00 2001 From: Ishankoradia Date: Tue, 16 Apr 2024 11:38:42 +0530 Subject: [PATCH 2/2] minor change --- llm/api.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/llm/api.py b/llm/api.py index e8d07af..e64a750 100644 --- a/llm/api.py +++ b/llm/api.py @@ -50,9 +50,12 @@ def create_chat(request): openai.api_key = organization.openai_key question = request.data.get("question").strip() - system_prompt = request.data.get( - "system_prompt", organization.system_prompt - ).strip() + system_prompt = ( + request.data.get("system_prompt", None) or organization.system_prompt + ) + system_prompt = system_prompt.strip() if system_prompt else None + logger.info(f"Using the system prompt : {system_prompt}") + gpt_model = request.data.get("gpt_model", "gpt-3.5-turbo").strip() session_id = (request.data.get("session_id") or generate_session_id()).strip()