Skip to content

Commit

Permalink
adding support for system level prompt
Browse files Browse the repository at this point in the history
  • Loading branch information
AkhileshNegi committed Apr 7, 2024
1 parent 26f5903 commit deb7a0e
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 9 deletions.
18 changes: 11 additions & 7 deletions llm/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,10 @@ def create_chat(request):

openai.api_key = organization.openai_key

prompt = request.data.get("prompt").strip()
question = request.data.get("question").strip()
system_prompt = request.data.get(
"system_prompt", organization.system_prompt
).strip()
gpt_model = request.data.get("gpt_model", "gpt-3.5-turbo").strip()
session_id = (request.data.get("session_id") or generate_session_id()).strip()

Expand All @@ -59,7 +62,7 @@ def create_chat(request):
messages=[
{
"role": "user",
"content": f"Detect the languages in this text: {prompt}",
"content": f"Detect the languages in this text: {question}",
}
],
functions=[
Expand Down Expand Up @@ -111,7 +114,7 @@ def create_chat(request):

# 2. Pull relevant chunks from vector database
prompt_embeddings = openai.Embedding.create(
model="text-embedding-ada-002", input=prompt
model="text-embedding-ada-002", input=question
)["data"][0]["embedding"]

embedding_results = (
Expand Down Expand Up @@ -146,10 +149,11 @@ def create_chat(request):
# 3. Fetch the chat history from our message store to send to openai and back in the response
historical_chats = Message.objects.filter(session_id=session_id).all()

# 4. Retrievial question and answer (2nd call to OpenAI, use language from 1. to help LLM respond in same language as user question)
# 4. Retrieval question and answer (2nd call to OpenAI, use language from 1. to help LLM respond in same language as user question)
response = openai.ChatCompletion.create(
model=gpt_model,
messages=context_prompt_messages(
system_prompt,
organization.id,
language_results["language"],
relevant_english_context,
Expand All @@ -168,7 +172,7 @@ def create_chat(request):
evaluator_prompts = organization.evaluator_prompts # { 'coherence': ... }
for criteria, evaluator_prompt in evaluator_prompts.items():
score = evaluate_criteria_score(
evaluator_prompt, prompt, prompt_response, gpt_model
evaluator_prompt, question, prompt_response, gpt_model
)
evaluation_scores[criteria] = score
logger.info(f"Evaluated criteria: {criteria} with score: {score}")
Expand All @@ -182,7 +186,7 @@ def create_chat(request):
Message.objects.create(
session_id=session_id,
role="user",
message=prompt,
message=question,
evaluation_score=evaluation_scores,
)
Message.objects.create(
Expand All @@ -195,7 +199,7 @@ def create_chat(request):

return JsonResponse(
{
"question": prompt,
"question": question,
"answer": prompt_response.content,
"language_results": language_results,
"embedding_results_count": len(embedding_results),
Expand Down
4 changes: 2 additions & 2 deletions llm/utils/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@


def context_prompt_messages(
system_prompt: str,
organization_id: int,
language: str,
english_context: str,
Expand All @@ -17,10 +18,9 @@ def context_prompt_messages(
) -> list[dict]:
org = Organization.objects.filter(id=organization_id).first()

org_system_prompt = org.system_prompt
examples_text = org.examples_text

system_message_prompt = {"role": "system", "content": org_system_prompt}
system_message_prompt = {"role": "system", "content": system_prompt}
human_message_prompt = {
"role": "user",
"content": f"""Use the following pieces of context to answer the question at the end. If you don't know the answer, just say that you don't know, don't try to make up an answer.
Expand Down

0 comments on commit deb7a0e

Please sign in to comment.