Skip to content

Commit

Permalink
Merge pull request #46 from jhakulin/main
Browse files Browse the repository at this point in the history
o1-assistant configuration
  • Loading branch information
jhakulin authored Sep 21, 2024
2 parents b4bdc9d + d3d76f3 commit 7e4ec6f
Show file tree
Hide file tree
Showing 6 changed files with 259 additions and 3 deletions.
52 changes: 52 additions & 0 deletions config/o1_assistant_assistant_config.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,52 @@
name: o1_assistant
instructions: |-
### Pre-requisites for processing
- You will get user input in the form of a question or prompt.
- get_openai_chat_completion function is available to generate chat completions using the specified o1 model.
### Requirements
1. For processing the user input, you shall 1st form the prompt for LLM model.
2. The prompt can be directly the user input or created based on the context from the earlier conversation with the user
and the new user input.
3. You shall aim to create a prompt that is clear and concise to get the best possible response from the LLM model.
4. Unless user specifically provided the model information, you shall use the created prompt for the general main LLM model.
5. Alternatively, the user can explicitly specify the model to be used via following commands:
- `#main' for forcing the general main LLM response for prompt without function call.
- `#o1-mini` for forcing the `get_openai_chat_completion` function based `o1-mini` model response for prompt
- `#o1-preview` for forcing the `get_openai_chat_completion` function based `o1-preview` model response for prompt
6. If user provided image as input, you shall convert the image to text and use the text as prompt for LLM model.
model: gpt-4o
assistant_id:
file_references: []
tool_resources:
code_interpreter:
files: {}
file_search:
vector_stores: []
functions:
- type: function
function:
name: get_openai_chat_completion
module: azure.ai.assistant.functions.llm_functions
description: Generates a chat completion for the given prompt using the prompt
and specified model.
parameters:
type: object
properties:
prompt:
type: string
description: The prompt for which the chat completion is to be generated.
model:
type: string
description: The model to use for generating the chat completion.
required:
- prompt
- model
file_search: false
code_interpreter: false
output_folder_path: ''
ai_client_type: OPEN_AI
assistant_type: assistant
completion_settings: null
assistant_role: user
config_folder: null
50 changes: 50 additions & 0 deletions config/system_function_specs.json
Original file line number Diff line number Diff line change
Expand Up @@ -234,5 +234,55 @@
]
}
}
},
{
"type": "function",
"function": {
"name": "get_openai_chat_completion",
"module": "azure.ai.assistant.functions.llm_functions",
"description": "Generates a chat completion for the given prompt using the prompt and specified model.",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "The prompt for which the chat completion is to be generated."
},
"model": {
"type": "string",
"description": "The model to use for generating the chat completion."
}
},
"required": [
"prompt",
"model"
]
}
}
},
{
"type": "function",
"function": {
"name": "get_azure_openai_chat_completion",
"module": "azure.ai.assistant.functions.llm_functions",
"description": "Generates a chat completion for the given prompt using the prompt and specified model.",
"parameters": {
"type": "object",
"properties": {
"prompt": {
"type": "string",
"description": "The prompt for which the chat completion is to be generated."
},
"model": {
"type": "string",
"description": "The model to use for generating the chat completion."
}
},
"required": [
"prompt",
"model"
]
}
}
}
]
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
# Assistant middleware from GitHub Release
https://github.com/Azure-Samples/azureai-assistant-tool/releases/download/v0.4.4-alpha/azure_ai_assistant-0.4.4a1-py3-none-any.whl
https://github.com/Azure-Samples/azureai-assistant-tool/releases/download/v0.4.5-alpha/azure_ai_assistant-0.4.5a1-py3-none-any.whl

# GUI Framework
PySide6
Expand Down
2 changes: 1 addition & 1 deletion sdk/azure-ai-assistant/azure/ai/assistant/_version.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------

VERSION = "0.4.4a1"
VERSION = "0.4.5a1"
147 changes: 147 additions & 0 deletions sdk/azure-ai-assistant/azure/ai/assistant/functions/llm_functions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,147 @@
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root for full license information.

from azure.ai.assistant.management.ai_client_factory import AIClientFactory, AIClientType
from azure.ai.assistant.management.conversation_thread_client import ConversationThreadClient
from azure.ai.assistant.management.message import ConversationMessage
from azure.ai.assistant.management.logger_module import logger

from typing import Dict, Any, List
import json, copy


def _initialize_clients(client_type):
try:
ai_client = AIClientFactory.get_instance().get_client(client_type)
thread_client = ConversationThreadClient.get_instance(client_type)
return ai_client, thread_client
except Exception as e:
error_message = f"Failed to initialize AI or thread client: {str(e)}"
logger.exception(error_message)
return None, None


def _retrieve_and_parse_conversation(thread_client):
try:
thread_config = thread_client.get_config()
thread_id = thread_config.get_current_thread_id()
logger.info(f"retrieve_and_parse_conversation, thread_id: {thread_id}")

thread_name = thread_config.get_thread_name_by_id(thread_id)
# Retrieve max 10 last text messages from the conversation
conversation = thread_client.retrieve_conversation(
thread_name=thread_name, max_text_messages=10
)
messages = _parse_text_messages(conversation.messages)
return messages
except Exception as e:
error_message = f"Failed to retrieve or parse conversation: {str(e)}"
logger.exception(error_message)
return None


def _generate_chat_completion(ai_client, model, messages):
logger.info(f"generate_chat_completion, messages: {messages}")
logger.info(f"generate_chat_completion, model: {model}")

try:
# Generate the chat completion
response = ai_client.chat.completions.create(
model=model,
messages=messages
)
logger.info(f"generate_chat_completion, response: {response}")

# Extract the content of the first choice
if response.choices and response.choices[0].message:
message_content = response.choices[0].message.content
else:
message_content = "No response"

return json.dumps({"result": message_content})
except Exception as e:
error_message = f"Failed to generate chat completion: {str(e)}"
logger.exception(error_message)
return json.dumps({"function_error": error_message})


def _update_messages_with_prompt(messages : List[ConversationMessage], prompt):
updated_messages = copy.deepcopy(messages)

new_message = {
"role": "user",
"content": [{"type": "text", "text": prompt}]
}

if not updated_messages:
updated_messages.append(new_message)
return updated_messages

for message in reversed(updated_messages):
if message.get("role") == "user":
message["content"] = new_message["content"]
return updated_messages

# If no user message is found, append the new user message
updated_messages.append(new_message)

return updated_messages


def _parse_text_messages(messages: List['ConversationMessage']) -> List[Dict[str, Any]]:
parsed_messages = []
for message in reversed(messages):
if message.text_message:
parsed_messages.append({
"role": message.role,
"content": [{"type": "text", "text": message.text_message.content}]
})
return parsed_messages


def get_openai_chat_completion(prompt: str, model: str) -> str:
"""
Generates a chat completion for the given prompt using the specified model.
:param prompt: The prompt for which the chat completion is to be generated.
:type prompt: str
:param model: The model to be used for generating the chat completion.
:type model: str
:return: JSON formatted string containing the result or an error message.
:rtype: str
"""
ai_client, thread_client = _initialize_clients(AIClientType.OPEN_AI)
if not ai_client or not thread_client:
return json.dumps({"function_error": "Failed to initialize AI or thread client."})

messages = _retrieve_and_parse_conversation(thread_client)
if messages is None:
return json.dumps({"function_error": "Failed to retrieve or parse conversation."})

messages = _update_messages_with_prompt(messages, prompt)
return _generate_chat_completion(ai_client, model, messages)


def get_azure_openai_chat_completion(prompt: str, model: str) -> str:
"""
Generates a chat completion for the given prompt using the specified Azure OpenAI model.
:param prompt: The prompt for which the chat completion is to be generated.
:type prompt: str
:param model: The Azure OpenAI model to be used for generating the chat completion.
:type model: str
:return: JSON formatted string containing the result or an error message.
:rtype: str
"""
ai_client, thread_client = _initialize_clients(AIClientType.AZURE_OPEN_AI)
if not ai_client or not thread_client:
return json.dumps({"function_error": "Failed to initialize Azure AI or thread client."})

messages = _retrieve_and_parse_conversation(thread_client)
if messages is None:
return json.dumps({"function_error": "Failed to retrieve or parse conversation."})

messages = _update_messages_with_prompt(messages, prompt)
return _generate_chat_completion(ai_client, model, messages)
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,11 @@
find_files_by_extension_in_directory,
)

from azure.ai.assistant.functions.llm_functions import (
get_openai_chat_completion,
get_azure_openai_chat_completion,
)

# Statically defined system functions for fast reference
system_functions = {
"fetch_current_datetime": fetch_current_datetime,
Expand All @@ -30,5 +35,7 @@
"find_all_folders_by_name_from_current_directory": find_all_folders_by_name_from_current_directory,
"retrieve_current_directory_structure_subfolders": retrieve_current_directory_structure_subfolders,
"find_files_by_name_in_directory": find_files_by_name_in_directory,
"find_files_by_extension_in_directory": find_files_by_extension_in_directory
"find_files_by_extension_in_directory": find_files_by_extension_in_directory,
"get_openai_chat_completion": get_openai_chat_completion,
"get_azure_openai_chat_completion": get_azure_openai_chat_completion,
}

0 comments on commit 7e4ec6f

Please sign in to comment.