From 48357559f96dd71d3cedeba2db348adbb58d21b6 Mon Sep 17 00:00:00 2001 From: ph-ausseil Date: Fri, 23 Feb 2024 17:12:53 +0100 Subject: [PATCH 1/7] tests --- AFAAS/core/agents/prompt_manager.py | 196 ++++++++++ .../interfaces/adapters/chatmodel/__init__.py | 26 ++ .../adapters/chatmodel/chatmessage.py | 87 +++++ .../adapters/chatmodel/chatmodel.py | 249 +++++++++++++ .../interfaces/adapters/chatmodel/wrapper.py | 223 ++++++++++++ AFAAS/interfaces/adapters/configuration.py | 57 +++ AFAAS/interfaces/adapters/language_model.py | 149 ++++++++ AFAAS/interfaces/prompts/strategy.py | 336 ++++++++++++++++++ 8 files changed, 1323 insertions(+) create mode 100644 AFAAS/core/agents/prompt_manager.py create mode 100644 AFAAS/interfaces/adapters/chatmodel/__init__.py create mode 100644 AFAAS/interfaces/adapters/chatmodel/chatmessage.py create mode 100644 AFAAS/interfaces/adapters/chatmodel/chatmodel.py create mode 100644 AFAAS/interfaces/adapters/chatmodel/wrapper.py create mode 100644 AFAAS/interfaces/adapters/configuration.py create mode 100644 AFAAS/interfaces/adapters/language_model.py create mode 100644 AFAAS/interfaces/prompts/strategy.py diff --git a/AFAAS/core/agents/prompt_manager.py b/AFAAS/core/agents/prompt_manager.py new file mode 100644 index 00000000000..5fdbb870563 --- /dev/null +++ b/AFAAS/core/agents/prompt_manager.py @@ -0,0 +1,196 @@ +from __future__ import annotations + +import platform +import time +import AFAAS.prompts.common as common_module +from pydantic import BaseModel, ConfigDict +from typing import TYPE_CHECKING, Any + +from AFAAS.interfaces.agent.assistants.prompt_manager import AbstractPromptManager , LLMConfig + +from AFAAS.prompts import BaseTaskRagStrategy, load_all_strategies + +from AFAAS.interfaces.adapters.language_model import AbstractPromptConfiguration +from AFAAS.interfaces.adapters.chatmodel.chatmodel import ChatPrompt +from AFAAS.interfaces.agent.features.agentmixin import AgentMixin +from AFAAS.interfaces.prompts.strategy import AbstractPromptStrategy + +if TYPE_CHECKING: + from AFAAS.interfaces.prompts.strategy import ( + AbstractPromptStrategy) + from AFAAS.interfaces.agent.main import BaseAgent + +from AFAAS.interfaces.adapters.chatmodel import ( + AbstractChatModelProvider, + AbstractChatModelResponse, +) +from AFAAS.interfaces.adapters.chatmodel.wrapper import ChatCompletionKwargs, ChatModelWrapper +from AFAAS.lib.sdk.logger import AFAASLogger +from AFAAS.core.adapters.openai.chatmodel import AFAASChatOpenAI +LOG = AFAASLogger(name=__name__) + + +# FIXME: Find somewhere more appropriate +class SystemInfo(dict): + os_info: str + # provider : OpenAIProvider + api_budget: float + current_time: str + +class BasePromptManager(AgentMixin, AbstractPromptManager): + + def __init__( + self, + config : LLMConfig = LLMConfig( + default = AFAASChatOpenAI(), + cheap = AFAASChatOpenAI(), + long_context = AFAASChatOpenAI(), + code_expert = AFAASChatOpenAI(), + ), + ) -> None: + self._prompt_strategies = {} + AgentMixin.__init__(self= self) + AbstractPromptManager.__init__(self = self, config=config) + + def add_strategies(self, strategies : list[AbstractPromptStrategy])->None : + for strategy in strategies: + self._prompt_strategies[strategy.STRATEGY_NAME] = strategy + + def add_strategy(self, strategy: AbstractPromptStrategy) -> None: + self._prompt_strategies[strategy.STRATEGY_NAME] = strategy + + def get_strategy(self, strategy_name: str) -> AbstractPromptStrategy: + return self._prompt_strategies[strategy_name] + + def set_agent(self, agent: "BaseAgent"): + if not hasattr(self, "_agent") or self._agent is None: + super().set_agent(agent) + self.load_strategies() + + def load_strategies(self) -> list[AbstractPromptStrategy]: + + # Dynamically load strategies from AFAAS.prompts.common + for attribute_name in dir(common_module): + attribute = getattr(common_module, attribute_name) + if isinstance(attribute, type) and issubclass(attribute, AbstractPromptStrategy) and attribute not in (AbstractPromptStrategy, BaseTaskRagStrategy): + self.load_strategy(strategy_module_attr=attribute) + + # TODO : This part should be migrated + # 1. Each Tool is in a Folder + # 2. Each Folder has prompts + # 3. Start migration with refine_user_context + strategies: list[AbstractPromptStrategy] = [] + strategies = load_all_strategies() + for strategy in strategies: + self.add_strategy(strategy=strategy) + + return self._prompt_strategies + + def load_strategy(self, strategy_module_attr : type) : + if isinstance(strategy_module_attr, type) and issubclass(strategy_module_attr, AbstractPromptStrategy) and strategy_module_attr not in (AbstractPromptStrategy, BaseTaskRagStrategy): + self.add_strategy( strategy_module_attr(**strategy_module_attr.default_configuration.model_dump())) + + async def execute_strategy(self, strategy_name: str, **kwargs) -> AbstractChatModelResponse: + if strategy_name not in self._prompt_strategies: + raise ValueError(f"Invalid strategy name {strategy_name}") + + prompt_strategy: AbstractPromptStrategy = self.get_strategy(strategy_name=strategy_name ) + if not hasattr(prompt_strategy, "_agent") or prompt_strategy._agent is None: + prompt_strategy.set_agent(agent=self._agent) + + kwargs.update(self.get_system_info(strategy = prompt_strategy)) + + LOG.trace( + f"Executing strategy : {prompt_strategy.STRATEGY_NAME}" + ) + + prompt_strategy.set_tools(**kwargs) + + return await self.send_to_chatmodel(prompt_strategy, **kwargs) + + async def send(self, prompt_strategy : AbstractPromptStrategy, **kwargs): + llm_provider = prompt_strategy.get_llm_provider() + if (isinstance(llm_provider, AbstractChatModelProvider)): + return await self.send_to_chatmodel(prompt_strategy, **kwargs) + else : + return await self.send_to_languagemodel(prompt_strategy, **kwargs) + + async def send_to_languagemodel( + self, + prompt_strategy: AbstractPromptStrategy, + **kwargs, + ) : + raise NotImplementedError("Language Model not implemented") + + async def send_to_chatmodel( + self, + prompt_strategy: AbstractPromptStrategy, + **kwargs, + ) -> AbstractChatModelResponse: + + # Get the Provider : Gemini, OpenAI, Llama, ... + provider : AbstractChatModelProvider = prompt_strategy.get_llm_provider() + + # Get the Prompt Configuration : Model version (eg: gpt-3.5, gpt-4...), temperature, top_k + model_configuration : AbstractPromptConfiguration = prompt_strategy.get_prompt_config() + if not isinstance( model_configuration , AbstractPromptConfiguration): + LOG.error(f"{prompt_strategy.__class__.__name__}.get_prompt_config() does not have a valid model configuration, type AbstractPromptConfiguration expected. Using default configuration.") + provider = self.config.default + model_configuration = AbstractPromptConfiguration( + llm_model_name= self.config.default.__llmmodel_default__(), + temperature= self.config.default_temperature + ) + + model_configuration_dict = model_configuration.model_dump() + LOG.trace(f"Using model configuration: {model_configuration_dict}") + + # FIXME : Check if Removable + template_kwargs = self.get_system_info(strategy = prompt_strategy) + + + template_kwargs.update(kwargs) + template_kwargs.update(model_configuration_dict) + + prompt : ChatPrompt = await prompt_strategy.build_message(**template_kwargs) + + completion_kwargs = ChatCompletionKwargs( + tool_choice=prompt.tool_choice, + default_tool_choice=prompt.default_tool_choice, + tools=prompt.tools, + llm_model_name= model_configuration_dict.pop("llm_model_name", None), + completion_parser=prompt_strategy.parse_response_content, + ) + llm_wrapper = ChatModelWrapper(llm_model=provider) + + response: AbstractChatModelResponse = await llm_wrapper.create_chat_completion( + chat_messages = prompt.messages, + completion_kwargs = completion_kwargs, + completion_parser = prompt_strategy.parse_response_content, + **model_configuration_dict, #NOTE: May be remove the kwarg argument + ) + + response.chat_messages = prompt.messages + response.system_prompt = prompt.messages[0].content + return response + + def get_system_info(self, strategy: AbstractPromptStrategy) -> SystemInfo: + provider = strategy.get_llm_provider() + template_kwargs = { + "os_info": self.get_os_info(), + "api_budget": provider.get_remaining_budget(), + "current_time": time.strftime("%c"), + } + return template_kwargs + + @staticmethod + def get_os_info() -> str: + + os_name = platform.system() + if os_name != "Linux" : + return platform.platform(terse=True) + else : + import distro + return distro.name(pretty=True) + + def __repr__(self) -> str | tuple[Any, ...]: + return f"{__class__.__name__}():\nAgent:{self._agent.agent_id}\nStrategies:{self._prompt_strategies}" diff --git a/AFAAS/interfaces/adapters/chatmodel/__init__.py b/AFAAS/interfaces/adapters/chatmodel/__init__.py new file mode 100644 index 00000000000..dd5f4753bfe --- /dev/null +++ b/AFAAS/interfaces/adapters/chatmodel/__init__.py @@ -0,0 +1,26 @@ +from __future__ import annotations +from AFAAS.interfaces.adapters.chatmodel.chatmodel import ( + AbstractChatModelProvider, + AbstractChatModelResponse, + ChatModelInfo, + ChatPrompt, + CompletionModelFunction, +) + + +from AFAAS.interfaces.adapters.chatmodel.chatmessage import ( + AbstractChatMessage, + AIMessage, + ChatMessage, + FunctionMessage, + HumanMessage, + Role, + SystemMessage, + AssistantChatMessage, + AssistantFunctionCall, +) + +from AFAAS.interfaces.adapters.chatmodel.wrapper import ( + ChatCompletionKwargs, + ChatModelWrapper, +) diff --git a/AFAAS/interfaces/adapters/chatmodel/chatmessage.py b/AFAAS/interfaces/adapters/chatmodel/chatmessage.py new file mode 100644 index 00000000000..7e271d88a0d --- /dev/null +++ b/AFAAS/interfaces/adapters/chatmodel/chatmessage.py @@ -0,0 +1,87 @@ +from __future__ import annotations +import abc +import enum +from typing import ClassVar, Optional, Literal +from pydantic import BaseModel +from AFAAS.lib.sdk.logger import AFAASLogger +from langchain_core.messages import ChatMessage, HumanMessage, SystemMessage, AIMessage, FunctionMessage + +LOG = AFAASLogger(name=__name__) + +class AbstractRoleLabels(abc.ABC, BaseModel): + USER: str + SYSTEM: str + ASSISTANT: str + + FUNCTION: Optional[str] = None + """May be used for the return value of function calls""" + +class Role(str, enum.Enum): + USER = "user" + SYSTEM = "system" + ASSISTANT = "assistant" + + FUNCTION = "function" + +class AbstractChatMessage(abc.ABC, BaseModel): + _role_labels: ClassVar[AbstractRoleLabels] + role: str + content: str + + @classmethod + def assistant(cls, content: str) -> "AbstractChatMessage": + return cls(role=cls._role_labels.ASSISTANT, content=content) + + @classmethod + def user(cls, content: str) -> "AbstractChatMessage": + return cls(role=cls._role_labels.USER, content=content) + + @classmethod + def system(cls, content: str) -> "AbstractChatMessage": + return cls(role=cls._role_labels.SYSTEM, content=content) + + + def to_langchain(self) -> ChatMessage: + if self.role == self._role_labels.ASSISTANT: + return AIMessage(content=self.content) + elif self.role == self._role_labels.USER: + return HumanMessage(content=self.content) + elif self.role == self._role_labels.SYSTEM: + return SystemMessage(content=self.content) + elif self.role == self._role_labels.FUNCTION: + return FunctionMessage(content=self.content) + else: + raise ValueError(f"Unknown role: {self.role}") + + @classmethod + def from_langchain(cls, message: ChatMessage) -> "AbstractChatMessage": + if isinstance(message, AIMessage): + return cls.assistant(content=message.content) + elif isinstance(message, HumanMessage): + return cls.user(content=message.content) + elif isinstance(message, SystemMessage): + return cls.system(content=message.content) + elif isinstance(message, FunctionMessage): + return cls.system(content=message.content) + else: + raise ValueError(f"Unknown message type: {type(message)}") + + def model_dump(self, **kwargs): + d = super().model_dump(**kwargs) + d["role"] = self.role + return d + +class AssistantFunctionCall(BaseModel): + name: str + arguments: str + +class AssistantToolCall(BaseModel): + # id: str + type: Literal["function"] + function: AssistantFunctionCall + +class AssistantChatMessage(AbstractChatMessage): + + role: Role = Role.ASSISTANT + content: Optional[str] = None + tool_calls: Optional[list[AssistantToolCall]] = None diff --git a/AFAAS/interfaces/adapters/chatmodel/chatmodel.py b/AFAAS/interfaces/adapters/chatmodel/chatmodel.py new file mode 100644 index 00000000000..71ea884a1ad --- /dev/null +++ b/AFAAS/interfaces/adapters/chatmodel/chatmodel.py @@ -0,0 +1,249 @@ +from __future__ import annotations +import abc +import functools +import time +from typing import ( + Any, + Callable, + Dict, + Generic, + Optional, + TypeVar, + ParamSpec, +) +from pydantic import BaseModel, Field + +from langchain_core.language_models.chat_models import BaseChatModel + +from AFAAS.interfaces.adapters.language_model import ( + AbstractLanguageModelProvider, + BaseModelInfo, + BaseModelResponse, + ModelProviderService, + AbstractPromptConfiguration, +) +from AFAAS.lib.utils.json_schema import JSONSchema + + +from openai import APIError, RateLimitError +from openai.resources import AsyncCompletions +from AFAAS.lib.sdk.logger import AFAASLogger +LOG = AFAASLogger(name=__name__) + +from langchain_core.messages import ChatMessage +from AFAAS.interfaces.adapters.chatmodel.chatmessage import AssistantChatMessage + + +class CompletionModelFunction(BaseModel): + name: str + description: str + parameters: Dict[str, JSONSchema] + + def schema(self , schema_builder = Callable) -> dict[str, str | dict | list]: + return schema_builder(self) + + def _remove_none_entries(self, data: Dict[str, Any]) -> Dict[str, Any]: + cleaned_data = {} + for key, value in data.items(): + if value is not None: + if isinstance(value, dict): + cleaned_data[key] = self._remove_none_entries(value) + else: + cleaned_data[key] = value + return cleaned_data + + def model_dump(self, *args, **kwargs): + # Call the parent class's model_dump() method to get the original dictionary + data = super().model_dump(*args, **kwargs) + + # Remove entries with None values recursively + cleaned_data = self._remove_none_entries(data) + + return cleaned_data + + def fmt_line(self) -> str: + params = ", ".join( + f"{name}: {p.type.value}" for name, p in self.parameters.items() + ) + return f"{self.name}: {self.description}. Params: ({params})" + + +class ChatPrompt(BaseModel): + # TODO : Remove or rewrite with 2 arguments + # messages : list[Union[ChatMessage, AbstractChatMessage]] + # chat_completion_kwargs : ChatCompletionKwargs + messages: list + tools: list[CompletionModelFunction] = Field(default_factory=list) + tool_choice: str + default_tool_choice: str + + def raw(self) -> list: + return [m.dict() for m in self.messages] + + def __str__(self): + return "\n\n".join( + [m.dict() for m in self.messages] + ) + + +_T = TypeVar("_T") + + +class AbstractChatModelResponse(BaseModelResponse, Generic[_T]): + + response: Optional[AssistantChatMessage] = None + parsed_result: _T = None + """Standard response struct for a response from a language model.""" + + content: dict = None + chat_messages: list[ChatMessage] = [] + system_prompt: str = None + + +class ChatModelInfo(BaseModelInfo): + llm_service : ModelProviderService = ModelProviderService.CHAT + max_tokens: int + has_function_call_api: bool = False + + +class AbstractChatModelProvider(AbstractLanguageModelProvider): + + #llm_model : Optional[BaseChatModel] = None + + @abc.abstractmethod + def count_message_tokens( + self, + messages: ChatMessage | list[ChatMessage], + model_name: str, + ) -> int: ... + + @abc.abstractmethod + async def chat( + self, messages: list[ChatMessage], *_, **llm_kwargs + ) -> AsyncCompletions: + ... + + @abc.abstractmethod + def make_model_arg(self, model_name : str) -> dict: + ... + + @abc.abstractmethod + def make_tool(self, f : CompletionModelFunction) -> dict: + ... + + @staticmethod + @abc.abstractmethod + def tool_builder(func: CompletionModelFunction) -> dict[str, str | dict | list]: + ... + @abc.abstractmethod + def make_tools_arg(self, tools : list[CompletionModelFunction]) -> dict: + ... + + @abc.abstractmethod + def make_tool_choice_arg(self , name : str) -> dict: + ... + + @abc.abstractmethod + def has_oa_tool_calls_api(self, model_name: str) -> bool: + ... + + @abc.abstractmethod + def get_default_config(self) -> AbstractPromptConfiguration: + ... + + @abc.abstractmethod + def extract_response_details( + self, response: AsyncCompletions, model_name: str + ) -> BaseModel: + ... + + @abc.abstractmethod + def should_retry_function_call( + self, tools: list[CompletionModelFunction], response_message: dict + ) -> bool: + ... + + @abc.abstractmethod + def formulate_final_response( + self, + response_message: dict, + completion_parser: Callable[[AssistantChatMessage], _T], + ) -> AbstractChatModelResponse[_T]: + ... + + def __getattribute__(self, __name: str): + + if not __name.startswith("__llmmodel_"): + return super().__getattribute__(__name) + + try: + return super().__getattribute__(__name) + except AttributeError: + return self.__llmmodel_default__() + + @abc.abstractmethod + def __llmmodel_default__(self) -> str: + ... + + + +_P = ParamSpec("_P") + +class _RetryHandler: + """Retry Handler for OpenAI API call. + + Args: + num_retries int: Number of retries. Defaults to 10. + backoff_base float: Base for exponential backoff. Defaults to 2. + warn_user bool: Whether to warn the user. Defaults to True. + """ + + _retry_limit_msg = "Error: Reached rate limit, passing..." + _api_key_error_msg = ( + "Please double check that you have setup a PAID OpenAI API Account. You can " + "read more here: https://docs.agpt.co/setup/#getting-an-api-key" + ) + _backoff_msg = "Error: API Bad gateway. Waiting {backoff} seconds..." + + def __init__( + self, + num_retries: int = 10, + backoff_base: float = 2.0, + warn_user: bool = True, + ): + self._num_retries = num_retries + self._backoff_base = backoff_base + self._warn_user = warn_user + + def _log_rate_limit_error(self) -> None: + LOG.trace(self._retry_limit_msg) + if self._warn_user: + LOG.warning(self._api_key_error_msg) + self._warn_user = False + + def _backoff(self, attempt: int) -> None: + backoff = self._backoff_base ** (attempt + 2) + LOG.trace(self._backoff_msg.format(backoff=backoff)) + time.sleep(backoff) + + def __call__(self, func: Callable[_P, _T]) -> Callable[_P, _T]: + @functools.wraps(func) + async def _wrapped(*args: _P.args, **kwargs: _P.kwargs) -> _T: + num_attempts = self._num_retries + 1 # +1 for the first attempt + for attempt in range(1, num_attempts + 1): + try: + return await func(*args, **kwargs) + + except RateLimitError: + if attempt == num_attempts: + raise + self._log_rate_limit_error() + + except APIError as e: + if (e.code != 502) or (attempt == num_attempts): + raise + except Exception as e: + LOG.warning(e) + self._backoff(attempt) + + return _wrapped diff --git a/AFAAS/interfaces/adapters/chatmodel/wrapper.py b/AFAAS/interfaces/adapters/chatmodel/wrapper.py new file mode 100644 index 00000000000..bff24b70253 --- /dev/null +++ b/AFAAS/interfaces/adapters/chatmodel/wrapper.py @@ -0,0 +1,223 @@ +from pydantic import BaseModel +from AFAAS.interfaces.adapters.chatmodel.chatmodel import LOG, _RetryHandler, AbstractChatModelProvider, AbstractChatModelResponse, CompletionModelFunction +from AFAAS.interfaces.adapters.language_model import AbstractPromptConfiguration + +from typing import ( + Callable, + TypeVar, + Optional +) +from AFAAS.interfaces.adapters.chatmodel.chatmessage import AssistantChatMessage + +from langchain_core.language_models.chat_models import BaseChatModel +from langchain_core.messages import ChatMessage +from openai.resources import AsyncCompletions + + +from typing import Callable + + + +_T = TypeVar("_T") + + +class ChatCompletionKwargs(BaseModel): + llm_model_name: str + """The name of the language model""" + tools: Optional[list[CompletionModelFunction]] = None + """List of available tools""" + tool_choice: Optional[str] = None + """Force the use of one tool""" + default_tool_choice: Optional[str] = None + """This tool would be called after 3 failed attemps(cf : try/catch block)""" + +class ChatModelWrapper: + + llm_adapter : AbstractChatModelProvider + + + def __init__(self, llm_model: BaseChatModel) -> None: + + self.llm_adapter = llm_model + + self.retry_per_request = llm_model._settings.configuration.retries_per_request + self.maximum_retry = llm_model._settings.configuration.maximum_retry + self.maximum_retry_before_default_function = llm_model._settings.configuration.maximum_retry_before_default_function + + retry_handler = _RetryHandler( + num_retries=self.retry_per_request, + ) + self._create_chat_completion = retry_handler(self._chat) + self._func_call_fails_count = 0 + + + async def create_chat_completion( + self, + chat_messages: list[ChatMessage], + completion_kwargs: ChatCompletionKwargs, + completion_parser: Callable[[AssistantChatMessage], _T], + # Function to parse the response, usualy injectect by an AbstractPromptStrategy + **kwargs, + ) -> AbstractChatModelResponse[_T]: + if isinstance(chat_messages, ChatMessage): + chat_messages = [chat_messages] + elif not isinstance(chat_messages, list): + raise TypeError( + f"Expected ChatMessage or list[ChatMessage], but got {type(chat_messages)}" + ) + + # ############################################################################## + # ### Prepare arguments for API call using CompletionKwargs + # ############################################################################## + llm_kwargs = self._make_chat_kwargs(completion_kwargs=completion_kwargs, **kwargs) + + # ############################################################################## + # ### Step 2: Execute main chat completion and extract details + # ############################################################################## + + response = await self._create_chat_completion( + messages=chat_messages, + llm_kwargs = llm_kwargs, + **kwargs + ) + response_message = self.llm_adapter.extract_response_details( + response=response, + model_name=completion_kwargs.llm_model_name + ) + + # ############################################################################## + # ### Step 3: Handle missing function call and retry if necessary + # ############################################################################## + # FIXME : Remove before commit + if self.llm_adapter.should_retry_function_call( + tools=completion_kwargs.tools, response_message=response_message + ): + LOG.error( + f"Attempt number {self._func_call_fails_count + 1} : Function Call was expected" + ) + if ( + self._func_call_fails_count + <= self.maximum_retry + ): + return await self._retry_chat_completion( + model_prompt=chat_messages, + completion_kwargs=completion_kwargs, + completion_parser=completion_parser, + response=response_message, + ) + + # FIXME, TODO, NOTE: Organize application save feedback loop to improve the prompts, as it is not normal that function are not called + try : + response_message.additional_kwargs['tool_calls'] = None + except Exception as e: + response_message['tool_calls'] = None + LOG.warning(f"Following Exception occurred : {e}") + + # self._handle_failed_retry(response_message) + + # ############################################################################## + # ### Step 4: Reset failure count and integrate improvements + # ############################################################################## + self._func_call_fails_count = 0 + + # ############################################################################## + # ### Step 5: Self feedback + # ############################################################################## + + # Create an option to deactivate feedbacks + # Option : Maximum number of feedbacks allowed + + # Prerequisite : Read OpenAI API (Chat Model) tool_choice section + + # User : 1 shirt take 5 minutes to dry , how long take 10 shirt to dry + # Assistant : It takes 50 minutes + + # System : "The user question was .... + # The Assistant Response was ..." + # Is it ok ? + # If not provide a feedback + + # => T shirt can be dried at the same time + + # ############################################################################## + # ### Step 6: Formulate the response + # ############################################################################## + return self.llm_adapter.formulate_final_response( + response_message=response_message, + completion_parser=completion_parser, + ) + + + async def _retry_chat_completion( + self, + model_prompt: list[ChatMessage], + completion_kwargs: ChatCompletionKwargs, + completion_parser: Callable[[AssistantChatMessage], _T], + response: AsyncCompletions, + **kwargs + ) -> AbstractChatModelResponse[_T]: + self._func_call_fails_count += 1 + + self.llm_adapter._budget.update_usage_and_cost(model_response=response.base_response) + return await self.create_chat_completion( + chat_messages=model_prompt, + completion_parser=completion_parser, + completion_kwargs= completion_kwargs, + **kwargs + ) + + def _make_chat_kwargs(self, completion_kwargs : ChatCompletionKwargs , **kwargs) -> dict: + + built_kwargs = {} + built_kwargs.update(self.llm_adapter.make_model_arg(model_name=completion_kwargs.llm_model_name)) + + if completion_kwargs.tools is None or len(completion_kwargs.tools) == 0: + #if their is no tool we do nothing + return built_kwargs + + else: + built_kwargs.update(self.llm_adapter.make_tools_arg(tools=completion_kwargs.tools)) + + if len(completion_kwargs.tools) == 1: + built_kwargs.update(self.llm_adapter.make_tool_choice_arg(name= completion_kwargs.tools[0].name)) + #built_kwargs.update(self.llm_adapter.make_tool_choice_arg(name= completion_kwargs.tools[0]["function"]["name"])) + elif completion_kwargs.tool_choice!= "auto": + if ( + self._func_call_fails_count + >= self.maximum_retry_before_default_function + ): + built_kwargs.update(self.llm_adapter.make_tool_choice_arg(name=completion_kwargs.default_tool_choice)) + else: + built_kwargs.update(self.llm_adapter.make_tool_choice_arg(name=completion_kwargs.tool_choice)) + return built_kwargs + + def count_message_tokens( + self, + messages: ChatMessage | list[ChatMessage], + model_name: str, + ) -> int: + return self.llm_adapter.count_message_tokens(messages, model_name) + + async def _chat( + self, + messages: list[ChatMessage], + llm_kwargs : dict, + *_, + **kwargs + ) -> AsyncCompletions: + + #llm_kwargs = self._make_chat_kwargs(**kwargs) + LOG.trace(messages[0].content) + LOG.trace(llm_kwargs) + return_value = await self.llm_adapter.chat( + messages=messages, **llm_kwargs + ) + + return return_value + + def has_oa_tool_calls_api(self, model_name: str) -> bool: + self.llm_adapter.has_oa_tool_calls_api(model_name) + + def get_default_config(self) -> AbstractPromptConfiguration: + return self.llm_adapter.get_default_config() + diff --git a/AFAAS/interfaces/adapters/configuration.py b/AFAAS/interfaces/adapters/configuration.py new file mode 100644 index 00000000000..f56d679b934 --- /dev/null +++ b/AFAAS/interfaces/adapters/configuration.py @@ -0,0 +1,57 @@ +import abc + +from pydantic import BaseModel, SecretBytes, SecretStr + +from AFAAS.configs.config import SystemSettings, UserConfigurable +from AFAAS.configs.schema import SystemConfiguration, update_model_config +from pydantic import ConfigDict +from pydantic.fields import Field + + +class BaseProviderUsage(SystemConfiguration, abc.ABC): + @abc.abstractmethod + def update_usage(self, *args, **kwargs) -> None: + """Update the usage of the resource.""" + ... + + +class BaseProviderBudget(SystemConfiguration): + total_budget: float = Field() + total_cost: float + remaining_budget: float + usage: BaseProviderUsage + + @abc.abstractmethod + def update_usage_and_cost(self, *args, **kwargs) -> None: + """Update the usage and cost of the resource.""" + ... + + +class BaseProviderCredentials(SystemConfiguration): + """Struct for credentials.""" + + def unmasked(self) -> dict: + return self.unmask(self) + + model_config = update_model_config(original= SystemConfiguration.model_config , + new = { + 'json_encoders' : { + SecretStr: lambda v: v.get_secret_value() if v else None, + SecretBytes: lambda v: v.get_secret_value() if v else None, + }} + ) + + def unmask(model: BaseModel): + unmasked_fields = {} + for field_name, _ in model.model_fields.items(): + value = getattr(model, field_name) + if isinstance(value, SecretStr): + unmasked_fields[field_name] = value.get_secret_value() + else: + unmasked_fields[field_name] = value + return unmasked_fields + + +class BaseProviderSettings(SystemSettings): + credentials: BaseProviderCredentials | None = None + budget: BaseProviderBudget | None = None diff --git a/AFAAS/interfaces/adapters/language_model.py b/AFAAS/interfaces/adapters/language_model.py new file mode 100644 index 00000000000..3060fbed8f7 --- /dev/null +++ b/AFAAS/interfaces/adapters/language_model.py @@ -0,0 +1,149 @@ +from __future__ import annotations + +import abc +import enum +from typing import Callable, ClassVar, Protocol, Optional, Any + +from pydantic import ConfigDict, BaseModel, Field +from AFAAS.lib.sdk.logger import AFAASLogger + +from AFAAS.configs.schema import SystemConfiguration, Field +from AFAAS.interfaces.adapters.configuration import ( + BaseProviderBudget, + BaseProviderCredentials, + BaseProviderSettings, + BaseProviderUsage, +) + +LOG = AFAASLogger(name=__name__) + +class ModelProviderService(str, enum.Enum): + """A ModelService describes what kind of service the model provides.""" + + EMBEDDING: str = "embedding" + CHAT: str = "language" + TEXT: str = "text" + +class ModelProviderName(str, enum.Enum): + OPENAI: str = "openai" + +class BaseModelInfo(BaseModel): + name: str + service: ModelProviderService + provider_name: ModelProviderName + prompt_token_cost: float = 0.0 + completion_token_cost: float = 0.0 + + +class BaseModelResponse(BaseModel): + prompt_tokens : int + completion_tokens: int + llm_model_info: BaseModelInfo + strategy: Optional[Any] = None # TODO: Should save the strategy used to get the response + + def __init__(self, **data: Any): + super().__init__(**data) + LOG.debug(f"BaseModelResponse does not save the strategy") + + +class BaseModelProviderConfiguration(SystemConfiguration): + extra_request_headers: dict[str, str] = Field(default_factory=dict) + retries_per_request: int = Field(default=10) + maximum_retry: int = 1 + maximum_retry_before_default_function: int = 1 + + +class BaseModelProviderCredentials(BaseProviderCredentials): + api_key: str | None = Field(default=None) + api_type: str | None = Field(default=None) + api_base: str | None = Field(default=None) + api_version: str | None = Field(default=None) + deployment_id: str | None = Field(default=None) + model_config = ConfigDict(extra="ignore") + + +class BaseModelProviderUsage(BaseProviderUsage): + completion_tokens: int = 0 + prompt_tokens: int = 0 + total_tokens: int = 0 + + def update_usage( + self, + model_response: BaseModelResponse, + ) -> None: + self.completion_tokens += model_response.completion_tokens + self.prompt_tokens += model_response.prompt_tokens + + self.total_tokens += ( + model_response.completion_tokens + model_response.prompt_tokens + + ) + + +class BaseModelProviderBudget(BaseProviderBudget): + total_budget: float = Field() + total_cost: float + remaining_budget: float + usage: BaseModelProviderUsage + + def update_usage_and_cost( + self, + model_response: BaseModelResponse, + ) -> None: + """Update the usage and cost of the provider.""" + llm_model_info = model_response.llm_model_info + self.usage.update_usage(model_response) + incurred_cost = ( + model_response.completion_tokens * llm_model_info.completion_token_cost + + model_response.prompt_tokens + * llm_model_info.prompt_token_cost + ) + self.total_cost += incurred_cost + if abs(self.remaining_budget) != float("inf"): + self.remaining_budget -= incurred_cost + + +class BaseModelProviderSettings(BaseProviderSettings): + configuration: BaseModelProviderConfiguration + credentials: BaseModelProviderCredentials + budget: BaseModelProviderBudget + + +class AbstractModelProvider(abc.ABC): + default_settings: ClassVar[BaseModelProviderSettings] + + _configuration: BaseModelProviderConfiguration + + @abc.abstractmethod + def count_tokens(self, text: str, model_name: str) -> int: ... + + @abc.abstractmethod + def get_tokenizer(self, model_name: str) -> "ModelTokenizer": ... + + @abc.abstractmethod + def get_token_limit(self, model_name: str) -> int: ... + + @abc.abstractmethod + def get_remaining_budget(self) -> float: ... + + +class AbstractLanguageModelProvider(AbstractModelProvider): + @abc.abstractmethod + def has_oa_tool_calls_api(self, model_name: str) -> bool: ... + + @abc.abstractmethod + def get_default_config(self) -> AbstractPromptConfiguration: ... + +class ModelTokenizer(Protocol): + @abc.abstractmethod + def encode(self, text: str) -> list: ... + + @abc.abstractmethod + def decode(self, tokens: list) -> str: ... + +class AbstractPromptConfiguration(abc.ABC, SystemConfiguration): + """Struct for model configuration.""" + + llm_model_name: str = Field() + temperature: float = Field() + diff --git a/AFAAS/interfaces/prompts/strategy.py b/AFAAS/interfaces/prompts/strategy.py new file mode 100644 index 00000000000..af0677dcab4 --- /dev/null +++ b/AFAAS/interfaces/prompts/strategy.py @@ -0,0 +1,336 @@ +from __future__ import annotations + +import abc +import os +import re +import sys +from typing import TYPE_CHECKING + +from jinja2 import Environment, FileSystemLoader, select_autoescape +from langchain_core.messages import ChatMessage, AIMessage + + +from AFAAS.configs.schema import SystemConfiguration + +if TYPE_CHECKING: + from AFAAS.interfaces.task.task import AbstractTask + +from AFAAS.interfaces.agent.features.agentmixin import AgentMixin +from AFAAS.interfaces.adapters.language_model import AbstractLanguageModelProvider, AbstractPromptConfiguration +from AFAAS.interfaces.adapters.chatmodel import ( + ChatPrompt, + AbstractChatModelProvider, + AbstractChatModelResponse, + AssistantChatMessage, + CompletionModelFunction, +) +from AFAAS.interfaces.prompts.utils.utils import ( + indent, + json_loads, + to_dotted_list, + to_md_quotation, + to_numbered_list, + to_string_list, +) +from AFAAS.lib.utils.json_schema import JSONSchema +from AFAAS.lib.sdk.logger import AFAASLogger + +LOG = AFAASLogger(name=__name__) +RESPONSE_SCHEMA = JSONSchema( + type=JSONSchema.Type.OBJECT, + properties={ + "thoughts": JSONSchema( + type=JSONSchema.Type.OBJECT, + required=True, + properties={ + "limits": JSONSchema( + description="Express your limitations (Context Limitation, Token Limitation, Cognitive Limitation) if you were an autonomous program hosted on a server and relying on a Large Language Model to take decision", + type=JSONSchema.Type.STRING, + required=True, + ), + "overcome_limit": JSONSchema( + description="How you woud overcome this limit (if any)", + type=JSONSchema.Type.STRING, + required=True, + ), + "reasoning": JSONSchema( + description="Your process of thoughts", + type=JSONSchema.Type.STRING, + required=True, + ), + "criticism": JSONSchema( + description="Constructive self-criticism of your process of thoughts", + type=JSONSchema.Type.STRING, + required=True, + ), + "plan": JSONSchema( + description="Short markdown-style bullet list that conveys your plan", + type=JSONSchema.Type.STRING, + required=True, + ), + "self_feedback": JSONSchema( + description="if you were to do it again what would you told yourself", + type=JSONSchema.Type.STRING, + required=True, + ), + }, + ) + }, +) + + +class DefaultParsedResponse(dict): + id: str + type: str + command_name: str + command_args: dict + assistant_reply_dict: dict + + +class PromptStrategiesConfiguration(SystemConfiguration): + temperature: float + # top_p: Optional[float] = None + # max_tokens: Optional[int] = None + # frequency_penalty: Optional[float] = None # Avoid repeting oneselfif coding 0.3 + # presence_penalty: Optional[float] = None # Avoid certain subjects + + +class AbstractPromptStrategy(AgentMixin, abc.ABC): + STRATEGY_NAME: str + default_configuration: PromptStrategiesConfiguration + + @abc.abstractmethod + async def build_message(self, *_, **kwargs) -> ChatPrompt: ... + + @abc.abstractmethod + def parse_response_content(self, response_content: AssistantChatMessage): ... + + @abc.abstractmethod + def set_tools(self, **kwargs): ... + + @abc.abstractmethod + def get_llm_provider(self) -> AbstractChatModelProvider: + """ Get the Provider : Gemini, OpenAI, Llama, ... """ + return self._agent.prompt_manager.config.default + + @abc.abstractmethod + def get_prompt_config(self) -> AbstractPromptConfiguration: + """ Get the Prompt Configuration : Model version (eg: gpt-3.5, gpt-4...), temperature, top_k, top_p, ... """ + return self.get_llm_provider().get_default_config() + + # TODO : This implementation is shit :) + def get_tools(self) -> list[CompletionModelFunction]: + """ + Returns a list of functions related to refining user context. + + Returns: + list: A list of CompletionModelFunction objects detailing each function's purpose and parameters. + + Example: + >>> strategy = RefineUserContextStrategy(...) + >>> functions = strategy.get_functions() + >>> print(functions[0].name) + 'refine_requirements' + """ + return self._tools + + def get_tools_names(self) -> list[str]: + """ + Returns a list of names of functions related to refining user context. + + Returns: + list: A list of strings, each representing the name of a function. + + Example: + >>> strategy = RefineUserContextStrategy(...) + >>> function_names = strategy.get_tools_names() + >>> print(function_names) + ['refine_requirements'] + """ + return [item.name for item in self._tools] + + # This can be expanded to support multiple types of (inter)actions within an agent + @abc.abstractmethod + def response_format_instruction( + self, + ) -> str: + language_model_provider = self.get_llm_provider() + model_name = self.get_prompt_config().llm_model_name + use_oa_tools_api = language_model_provider.has_oa_tool_calls_api( + model_name=model_name + ) + + response_schema = RESPONSE_SCHEMA.copy(deep=True) + if ( + use_oa_tools_api + and response_schema.properties + and "command" in response_schema.properties + ): + del response_schema.properties["command"] + + # Unindent for performance + response_format: str = re.sub( + r"\n\s+", + "\n", + response_schema.to_typescript_object_interface("Response"), + ) + + if use_oa_tools_api: + return ( + f"Respond strictly with a JSON of type `Response` :\n" + f"{response_format}" + ) + + return ( + f"Respond strictly with JSON{', and also specify a command to use through a tool_calls' if use_oa_tools_api else ''}. " + "The JSON should be compatible with the TypeScript type `Response` from the following:\n" + f"{response_format}" + ) + + ### + ### parse_response_content + ### + def default_parse_response_content( + self, + response_content: AssistantChatMessage, + ) -> list[DefaultParsedResponse]: + """Parse the actual text response from the objective model. + + Args: + response_content: The raw response content from the objective model. + + Returns: + The parsed response. + + """ + assistant_return_list: list[DefaultParsedResponse] = [] + + assistant_reply_dict = response_content.content + + if (isinstance(response_content, AIMessage)): + tool_calls = response_content.additional_kwargs['tool_calls'] + else: + tool_calls = response_content["tool_calls"] + + if tool_calls : + for tool in tool_calls: + try: + command_args = json_loads(tool["function"]["arguments"]) + except Exception: + LOG.warning(command_args) + + ### + ### NEW + ### + command_name = tool["function"]["name"] + + assistant_return_list.append( + DefaultParsedResponse( + id=tool["id"], + type=tool["type"], + command_name=command_name, + command_args=command_args, + assistant_reply_dict=assistant_reply_dict, + ) + ) + + return assistant_return_list + + @staticmethod + def get_autocorrection_response(response: AbstractChatModelResponse): + return response.parsed_result[0]["command_args"]["note_to_agent"] + + async def _build_jinja_message( + self, task: AbstractTask, template_name: str, template_params: dict + ) -> str: + """Build a message using jinja2 template engine""" + + # Get the module of the calling (child) class + class_module = sys.modules[self.__class__.__module__] + child_directory = os.path.dirname(os.path.abspath(class_module.__file__)) + # Check if template exists in child class directory, else use parent directory + if os.path.exists(os.path.join(child_directory, template_name)): + directory_to_use = child_directory + else: + directory_to_use = os.path.dirname(os.path.abspath(__file__)) + + file_loader = FileSystemLoader(directory_to_use) + env = Environment( + loader=file_loader, + autoescape=select_autoescape(["html", "xml"]), + extensions=["jinja2.ext.loopcontrols"], + ) + template = env.get_template(template_name) + + template_params.update( + { + "to_md_quotation": to_md_quotation, + "to_dotted_list": to_dotted_list, + "to_numbered_list": to_numbered_list, + "to_string_list": to_string_list, + "indent": indent, + "task": task, + "strategy": self, + } + ) + if hasattr(task, "task_parent"): + template_params.update({"task_parent": await task.task_parent()}) + + return template.render(template_params) + + + + + + def build_chat_prompt(self, messages: list[ChatMessage] , tool_choice : str = "auto") -> ChatPrompt: + ChatMessage + + # messagev2 = [] + # if isinstance(messages, list): + # for message in messages: + # messagev2.append(convert_v1_instance_to_v2_dynamic(message)) + # else : + # messagev2.append(convert_v1_instance_to_v2_dynamic(messages)) + + # print("""////////////////////////////////////\n\n"""*3) + # print(messages[0]) + # print(messagev2[0]) + # exit() + strategy_tools = self.get_tools() + prompt = ChatPrompt( + messages = messages , + tools= strategy_tools , + tool_choice = tool_choice , + default_tool_choice =self.default_tool_choice , + tokens_used = 0 , + ) + + return prompt + + + + +# from pydantic import BaseModel +# def convert_v1_instance_to_v2_dynamic(obj_v1: BaseModel) -> BaseModel: + +# from pydantic import create_model +# from typing import Type +# """ +# Converts an instance of a Pydantic v1 model to a dynamically created Pydantic v2 model instance. + +# Parameters: +# - obj_v1: The instance of the Pydantic version 1 model. + +# Returns: +# - An instance of a dynamically created Pydantic version 2 model that mirrors the structure of obj_v1. +# """ +# # Extract field definitions from the v1 instance +# fields = {name: (field.outer_type_, ...) for name, field in obj_v1.__fields__.items()} + +# # Dynamically create a new Pydantic model class +# DynamicModelV2 = create_model('DynamicModelV2', **fields) + +# # Convert the v1 instance to a dictionary and use it to create an instance of the new model +# obj_v2 = DynamicModelV2.parse_obj(obj_v1.dict()) + +# return obj_v2 From 2a20a85d18012ebf81b215f4dc25ce4410853cbd Mon Sep 17 00:00:00 2001 From: ph-ausseil Date: Fri, 23 Feb 2024 17:15:57 +0100 Subject: [PATCH 2/7] OpenAI Adapters --- AFAAS/core/adapters/openai/__init__.py | 1 + AFAAS/core/adapters/openai/chatmodel.py | 227 ++++++++++++++++++++ AFAAS/core/adapters/openai/configuration.py | 158 ++++++++++++++ 3 files changed, 386 insertions(+) create mode 100644 AFAAS/core/adapters/openai/__init__.py create mode 100644 AFAAS/core/adapters/openai/chatmodel.py create mode 100644 AFAAS/core/adapters/openai/configuration.py diff --git a/AFAAS/core/adapters/openai/__init__.py b/AFAAS/core/adapters/openai/__init__.py new file mode 100644 index 00000000000..9d48db4f9f8 --- /dev/null +++ b/AFAAS/core/adapters/openai/__init__.py @@ -0,0 +1 @@ +from __future__ import annotations diff --git a/AFAAS/core/adapters/openai/chatmodel.py b/AFAAS/core/adapters/openai/chatmodel.py new file mode 100644 index 00000000000..fd56667f589 --- /dev/null +++ b/AFAAS/core/adapters/openai/chatmodel.py @@ -0,0 +1,227 @@ +import os +from typing import Any, Callable, Dict, ParamSpec, Tuple, TypeVar + +import tiktoken +from openai.resources import AsyncCompletions + +from AFAAS.core.adapters.openai.configuration import ( + OPEN_AI_CHAT_MODELS, + OPEN_AI_DEFAULT_CHAT_CONFIGS, + OPEN_AI_MODELS, + OpenAIModelName, + OpenAIPromptConfiguration, + OpenAISettings, +) + +from langchain_core.messages import AIMessage , ChatMessage +from AFAAS.configs.schema import Configurable +from AFAAS.interfaces.adapters.chatmodel.chatmodel import ( + AbstractChatModelProvider, + AbstractChatModelResponse, + AssistantChatMessage, + CompletionModelFunction, +) +from AFAAS.interfaces.adapters.language_model import ModelTokenizer, BaseModelResponse +from AFAAS.lib.sdk.logger import AFAASLogger + +LOG = AFAASLogger(name=__name__) + +_T = TypeVar("_T") +_P = ParamSpec("_P") + + +class AFAASChatOpenAI(Configurable[OpenAISettings], AbstractChatModelProvider): + + def __llmmodel_default__(self) : + return "gpt-3.5-turbo" + + def __llmmodel_cheap__(self) : + return "gpt-3.5-turbo" + + def __llmmodel_code_expert_model__(self) : + return "gpt-3.5-turbo" + + def __llmmodel_long_context_model__(self) : + return "gpt-3.5-turbo" + + def __init__( + self, + settings: OpenAISettings = OpenAISettings(), + ): + super().__init__(settings) + self._credentials = settings.credentials + self._budget = settings.budget + + def get_token_limit(self, model_name: str) -> int: + return OPEN_AI_MODELS[model_name].max_tokens + + def get_remaining_budget(self) -> float: + return self._budget.remaining_budget + + @classmethod + def get_tokenizer(cls, model_name: OpenAIModelName) -> ModelTokenizer: + return tiktoken.encoding_for_model(model_name) + + @classmethod + def count_tokens(cls, text: str, model_name: OpenAIModelName) -> int: + encoding = cls.get_tokenizer(model_name) + return len(encoding.encode(text)) + + @classmethod + def count_message_tokens( + cls, + messages: ChatMessage | list[ChatMessage], + model_name: OpenAIModelName, + ) -> int: + + if model_name.startswith("gpt-3.5-turbo"): + tokens_per_message = ( + 4 # every message follows <|start|>{role/name}\n{content}<|end|>\n + ) + tokens_per_name = -1 # if there's a name, the role is omitted + encoding_model = "gpt-3.5-turbo" + elif model_name.startswith("gpt-4"): + tokens_per_message = 3 + tokens_per_name = 1 + encoding_model = "gpt-4" + else: + raise NotImplementedError( + f"count_message_tokens() is not implemented for model {model_name}.\n" + " See https://github.com/openai/openai-python/blob/main/chatml.md for" + " information on how messages are converted to tokens." + ) + try: + encoding = tiktoken.encoding_for_model(encoding_model) + except KeyError: + LOG.warning( + f"Model {model_name} not found. Defaulting to cl100k_base encoding." + ) + encoding = tiktoken.get_encoding("cl100k_base") + + num_tokens = 0 + for message in messages: + num_tokens += tokens_per_message + for key, value in message.dict().items(): + num_tokens += len(encoding.encode(value)) + if key == "name": + num_tokens += tokens_per_name + num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> + return num_tokens + + def extract_response_details( + self, response: AsyncCompletions, model_name: str + ) -> Tuple[Dict[str, Any], Dict[str, Any]]: + if (isinstance(response, AsyncCompletions)) : + response_args = BaseModelResponse( + llm_model_info=OPEN_AI_CHAT_MODELS[model_name], + prompt_tokens=response.usage.prompt_tokens, + completion_tokens=response.usage.completion_tokens, + ) + #response_message = response.choices[0].message.model_dump() + elif (isinstance(response, AIMessage)) : + # AGPT retro compatibility + response_args = BaseModelResponse( + llm_model_info=OPEN_AI_CHAT_MODELS[model_name], + prompt_tokens= self.callback.prompt_tokens, + completion_tokens= self.callback.completion_tokens, + ) + response.base_response = response_args + return response + + def should_retry_function_call( + self, tools: list[CompletionModelFunction], response_message: Dict[str, Any] + ) -> bool: + if not tools : + return False + + if not isinstance(response_message, AIMessage): + # AGPT retro compatibility + if "tool_calls" not in response_message: + return True + else: + if "tool_calls" not in response_message.additional_kwargs: + return True + + return False + + def formulate_final_response( + self, + response_message: Dict[str, Any], + completion_parser: Callable[[AssistantChatMessage], _T], + **kwargs + ) -> AbstractChatModelResponse[_T]: + + response_info = response_message.base_response.model_dump() + + response_message_dict = response_message.dict() + parsed_result = completion_parser(response_message) + + response = AbstractChatModelResponse( + response=response_message_dict, + parsed_result=parsed_result, + **response_info, + ) + self._budget.update_usage_and_cost(model_response=response) + return response + + def __repr__(self): + return "OpenAIProvider()" + + def has_oa_tool_calls_api(self, model_name: str) -> bool: + return True # Always True for OpenAI + + def get_default_config(self) -> OpenAIPromptConfiguration: + LOG.warning(f"Using {__class__.__name__} default config, we recommend setting individual model configs") + return OPEN_AI_DEFAULT_CHAT_CONFIGS.SMART_MODEL_32K + + def make_tools_arg(self, tools : list[CompletionModelFunction]) -> dict: + return { "tools" : [self.make_tool(f) for f in tools] } + + def make_tool(self, f : CompletionModelFunction) -> dict: + return {"type": "function", "function": f.schema(schema_builder=self.tool_builder)} + + @staticmethod + def tool_builder(func: CompletionModelFunction) -> dict[str, str | dict | list]: + return { + "name": func.name, + "description": func.description, + "parameters": { + "type": "object", + "properties": { + name: param.to_dict() for name, param in func.parameters.items() + }, + "required": [ + name for name, param in func.parameters.items() if param.required + ], + }, + } + + def make_tool_choice_arg(self , name : str) -> dict: + return { + "tool_choice" : { + "type": "function", + "function": {"name": name}, + } + } + + def make_model_arg(self, model_name : str) -> dict: + return { "model" : model_name } + + + async def chat( + self, messages: list[ChatMessage], *_, **llm_kwargs + ) -> AsyncCompletions: + + # Example : LangChain Client + from langchain_community.callbacks import get_openai_callback, OpenAICallbackHandler + from langchain_openai import ChatOpenAI + self.callback : OpenAICallbackHandler + self.llm_model = ChatOpenAI() #model = "gpt-3.5-turbo" , temperature=0.5 ) + with get_openai_callback() as callback: + self.callback : OpenAICallbackHandler = callback + return await self.llm_model.ainvoke(input = messages , **llm_kwargs) + + # Example : OAClient : + # aclient = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"]) + # self.llm_model = aclient.chat + # return await aclient.chat.completions.create( messages=messages, **llm_kwargs ) diff --git a/AFAAS/core/adapters/openai/configuration.py b/AFAAS/core/adapters/openai/configuration.py new file mode 100644 index 00000000000..1b4b9d701c4 --- /dev/null +++ b/AFAAS/core/adapters/openai/configuration.py @@ -0,0 +1,158 @@ +import enum +import math +import os +from typing import Callable, ClassVar, ParamSpec, TypeVar + +from openai import AsyncOpenAI + +from AFAAS.configs.schema import Field +from AFAAS.interfaces.adapters.chatmodel import ( + ChatModelInfo, +) +from AFAAS.interfaces.adapters.chatmodel.chatmessage import AbstractChatMessage, AbstractRoleLabels +from AFAAS.interfaces.adapters.language_model import ( + AbstractPromptConfiguration, + BaseModelProviderBudget, + BaseModelProviderConfiguration, + BaseModelProviderCredentials, + BaseModelProviderSettings, + BaseModelProviderUsage, + ModelProviderName, + ModelProviderService, +) +from AFAAS.lib.sdk.logger import AFAASLogger + +LOG = AFAASLogger(name=__name__) + +_T = TypeVar("_T") +_P = ParamSpec("_P") + + +class OpenAIRoleLabel(AbstractRoleLabels): + USER : str = "user" + SYSTEM : str = "system" + ASSISTANT : str = "assistant" + + FUNCTION : str = "function" + """May be used for the return value of function calls""" + + +class OpenAIChatMessage(AbstractChatMessage): + _role_labels: ClassVar[OpenAIRoleLabel] = OpenAIRoleLabel() + + +class OpenAIModelName(str, enum.Enum): + # TODO : Remove + ADA = "text-embedding-ada-002" + GPT3 = "gpt-3.5-turbo" + GPT3_16k = "gpt-3.5-turbo" + GPT3_FINE_TUNED = "gpt-3.5-turbo" + "" + GPT4 = "gpt-3.5-turbo" + GPT4_32k = "gpt-3.5-turbo" + + +OPEN_AI_CHAT_MODELS = { + #TODO : USEFULL FOR AGPT BUDGET MANAGEMENT + info.name: info + for info in [ + ChatModelInfo( + name=OpenAIModelName.GPT3, + service=ModelProviderService.CHAT, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.0015 / 1000, + completion_token_cost=0.002 / 1000, + max_tokens=4096, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT3_16k, + service=ModelProviderService.CHAT, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.003 / 1000, + completion_token_cost=0.004 / 1000, + max_tokens=16384, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT3_FINE_TUNED, + service=ModelProviderService.CHAT, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.0120 / 1000, + completion_token_cost=0.0160 / 1000, + max_tokens=4096, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT4, + service=ModelProviderService.CHAT, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.03 / 1000, + completion_token_cost=0.06 / 1000, + max_tokens=8191, + has_function_call_api=True, + ), + ChatModelInfo( + name=OpenAIModelName.GPT4_32k, + service=ModelProviderService.CHAT, + provider_name=ModelProviderName.OPENAI, + prompt_token_cost=0.06 / 1000, + completion_token_cost=0.12 / 1000, + max_tokens=32768, + has_function_call_api=True, + ), + ] +} + + +OPEN_AI_MODELS = { + **OPEN_AI_CHAT_MODELS, +} + + +class OpenAIProviderConfiguration(BaseModelProviderConfiguration): + ... + +class OpenAIModelProviderBudget(BaseModelProviderBudget): + graceful_shutdown_threshold: float = Field(default=0.005) + warning_threshold: float = Field(default=0.01) + + total_budget: float = math.inf + total_cost: float = 0.0 + remaining_budget: float = math.inf + usage: BaseModelProviderUsage = BaseModelProviderUsage() + + +class OpenAISettings(BaseModelProviderSettings): + configuration: OpenAIProviderConfiguration = OpenAIProviderConfiguration() + credentials: BaseModelProviderCredentials = BaseModelProviderCredentials() + budget: OpenAIModelProviderBudget = OpenAIModelProviderBudget() + name : str = "chat_model_provider" + description : str = "Provides access to OpenAI's API." + + +class OpenAIPromptConfiguration(AbstractPromptConfiguration): + ... + +class OPEN_AI_DEFAULT_CHAT_CONFIGS: + # TODO : Can be removed + FAST_MODEL_4K = OpenAIPromptConfiguration( + llm_model_name=OpenAIModelName.GPT3, + temperature=0.7, + ) + FAST_MODEL_16K = OpenAIPromptConfiguration( + llm_model_name=OpenAIModelName.GPT3_16k, + temperature=0.7, + ) + FAST_MODEL_FINE_TUNED_4K = OpenAIPromptConfiguration( + llm_model_name=OpenAIModelName.GPT3_FINE_TUNED, + temperature=0.7, + ) + MART_MODEL_8K = OpenAIPromptConfiguration( + llm_model_name=OpenAIModelName.GPT4, + temperature=0.7, + ) + SMART_MODEL_32K = OpenAIPromptConfiguration( + llm_model_name=OpenAIModelName.GPT4_32k, + temperature=0.7, + ) + From 4b96c1c1c4c3390ca684d47907a75951cb37671f Mon Sep 17 00:00:00 2001 From: ph-ausseil Date: Fri, 23 Feb 2024 16:26:20 +0100 Subject: [PATCH 3/7] INIT --- {AFAAS/core => autogpt/core/adapter}/adapters/openai/chatmodel.py | 0 .../core/adapter}/adapters/openai/configuration.py | 0 {AFAAS/core/agents => autogpt/core/agent}/prompt_manager.py | 0 {AFAAS/interfaces/prompts => autogpt/core/prompt}/strategy.py | 0 .../interfaces/adapters}/adapters/chatmodel/__init__.py | 0 .../interfaces/adapters}/adapters/chatmodel/chatmessage.py | 0 .../interfaces/adapters}/adapters/chatmodel/chatmodel.py | 0 .../interfaces/adapters}/adapters/chatmodel/wrapper.py | 0 .../interfaces/adapters}/adapters/configuration.py | 0 .../interfaces/adapters}/adapters/language_model.py | 0 10 files changed, 0 insertions(+), 0 deletions(-) rename {AFAAS/core => autogpt/core/adapter}/adapters/openai/chatmodel.py (100%) rename {AFAAS/core => autogpt/core/adapter}/adapters/openai/configuration.py (100%) rename {AFAAS/core/agents => autogpt/core/agent}/prompt_manager.py (100%) rename {AFAAS/interfaces/prompts => autogpt/core/prompt}/strategy.py (100%) rename {AFAAS/interfaces => autogpt/interfaces/adapters}/adapters/chatmodel/__init__.py (100%) rename {AFAAS/interfaces => autogpt/interfaces/adapters}/adapters/chatmodel/chatmessage.py (100%) rename {AFAAS/interfaces => autogpt/interfaces/adapters}/adapters/chatmodel/chatmodel.py (100%) rename {AFAAS/interfaces => autogpt/interfaces/adapters}/adapters/chatmodel/wrapper.py (100%) rename {AFAAS/interfaces => autogpt/interfaces/adapters}/adapters/configuration.py (100%) rename {AFAAS/interfaces => autogpt/interfaces/adapters}/adapters/language_model.py (100%) diff --git a/AFAAS/core/adapters/openai/chatmodel.py b/autogpt/core/adapter/adapters/openai/chatmodel.py similarity index 100% rename from AFAAS/core/adapters/openai/chatmodel.py rename to autogpt/core/adapter/adapters/openai/chatmodel.py diff --git a/AFAAS/core/adapters/openai/configuration.py b/autogpt/core/adapter/adapters/openai/configuration.py similarity index 100% rename from AFAAS/core/adapters/openai/configuration.py rename to autogpt/core/adapter/adapters/openai/configuration.py diff --git a/AFAAS/core/agents/prompt_manager.py b/autogpt/core/agent/prompt_manager.py similarity index 100% rename from AFAAS/core/agents/prompt_manager.py rename to autogpt/core/agent/prompt_manager.py diff --git a/AFAAS/interfaces/prompts/strategy.py b/autogpt/core/prompt/strategy.py similarity index 100% rename from AFAAS/interfaces/prompts/strategy.py rename to autogpt/core/prompt/strategy.py diff --git a/AFAAS/interfaces/adapters/chatmodel/__init__.py b/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py similarity index 100% rename from AFAAS/interfaces/adapters/chatmodel/__init__.py rename to autogpt/interfaces/adapters/adapters/chatmodel/__init__.py diff --git a/AFAAS/interfaces/adapters/chatmodel/chatmessage.py b/autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py similarity index 100% rename from AFAAS/interfaces/adapters/chatmodel/chatmessage.py rename to autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py diff --git a/AFAAS/interfaces/adapters/chatmodel/chatmodel.py b/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py similarity index 100% rename from AFAAS/interfaces/adapters/chatmodel/chatmodel.py rename to autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py diff --git a/AFAAS/interfaces/adapters/chatmodel/wrapper.py b/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py similarity index 100% rename from AFAAS/interfaces/adapters/chatmodel/wrapper.py rename to autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py diff --git a/AFAAS/interfaces/adapters/configuration.py b/autogpt/interfaces/adapters/adapters/configuration.py similarity index 100% rename from AFAAS/interfaces/adapters/configuration.py rename to autogpt/interfaces/adapters/adapters/configuration.py diff --git a/AFAAS/interfaces/adapters/language_model.py b/autogpt/interfaces/adapters/adapters/language_model.py similarity index 100% rename from AFAAS/interfaces/adapters/language_model.py rename to autogpt/interfaces/adapters/adapters/language_model.py From 75549a07908c6796e00cd5674a435874d5863f9b Mon Sep 17 00:00:00 2001 From: ph-ausseil Date: Fri, 23 Feb 2024 18:08:46 +0100 Subject: [PATCH 4/7] Move files & imports --- .../autogpt}/adapters/openai/chatmodel.py | 6 +- .../autogpt}/adapters/openai/configuration.py | 4 +- .../prompt_strategies}/prompt_manager.py | 6 +- .../agents/prompt_strategies}/strategy.py | 7 +- .../adapters/adapters/chatmodel/__init__.py | 0 .../adapters/chatmodel/chatmessage.py | 4 +- .../adapters/adapters/chatmodel/chatmodel.py | 6 +- .../adapters/adapters/chatmodel/wrapper.py | 0 .../adapters/adapters/configuration.py | 0 .../adapters/adapters/language_model.py | 4 +- autogpts/autogpt/autogpt/interfaces/utils.py | 97 +++++++++++++++++++ autogpts/autogpt/tests/vcr_cassettes | 2 +- 12 files changed, 117 insertions(+), 19 deletions(-) rename {autogpt/core/adapter => autogpts/autogpt/autogpt}/adapters/openai/chatmodel.py (98%) rename {autogpt/core/adapter => autogpts/autogpt/autogpt}/adapters/openai/configuration.py (98%) rename {autogpt/core/agent => autogpts/autogpt/autogpt/agents/prompt_strategies}/prompt_manager.py (98%) rename {autogpt/core/prompt => autogpts/autogpt/autogpt/agents/prompt_strategies}/strategy.py (98%) rename {autogpt => autogpts/autogpt/autogpt}/interfaces/adapters/adapters/chatmodel/__init__.py (100%) rename {autogpt => autogpts/autogpt/autogpt}/interfaces/adapters/adapters/chatmodel/chatmessage.py (97%) rename {autogpt => autogpts/autogpt/autogpt}/interfaces/adapters/adapters/chatmodel/chatmodel.py (98%) rename {autogpt => autogpts/autogpt/autogpt}/interfaces/adapters/adapters/chatmodel/wrapper.py (100%) rename {autogpt => autogpts/autogpt/autogpt}/interfaces/adapters/adapters/configuration.py (100%) rename {autogpt => autogpts/autogpt/autogpt}/interfaces/adapters/adapters/language_model.py (98%) create mode 100644 autogpts/autogpt/autogpt/interfaces/utils.py diff --git a/autogpt/core/adapter/adapters/openai/chatmodel.py b/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py similarity index 98% rename from autogpt/core/adapter/adapters/openai/chatmodel.py rename to autogpts/autogpt/autogpt/adapters/openai/chatmodel.py index fd56667f589..fcedb9664b3 100644 --- a/autogpt/core/adapter/adapters/openai/chatmodel.py +++ b/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py @@ -4,7 +4,7 @@ import tiktoken from openai.resources import AsyncCompletions -from AFAAS.core.adapters.openai.configuration import ( +from autogpt.adapters.openai.configuration import ( OPEN_AI_CHAT_MODELS, OPEN_AI_DEFAULT_CHAT_CONFIGS, OPEN_AI_MODELS, @@ -22,9 +22,9 @@ CompletionModelFunction, ) from AFAAS.interfaces.adapters.language_model import ModelTokenizer, BaseModelResponse -from AFAAS.lib.sdk.logger import AFAASLogger +import logging -LOG = AFAASLogger(name=__name__) +LOG = logging.getLogger(__name__) _T = TypeVar("_T") _P = ParamSpec("_P") diff --git a/autogpt/core/adapter/adapters/openai/configuration.py b/autogpts/autogpt/autogpt/adapters/openai/configuration.py similarity index 98% rename from autogpt/core/adapter/adapters/openai/configuration.py rename to autogpts/autogpt/autogpt/adapters/openai/configuration.py index 1b4b9d701c4..72f8ebf22e6 100644 --- a/autogpt/core/adapter/adapters/openai/configuration.py +++ b/autogpts/autogpt/autogpt/adapters/openai/configuration.py @@ -20,9 +20,9 @@ ModelProviderName, ModelProviderService, ) -from AFAAS.lib.sdk.logger import AFAASLogger +import logging -LOG = AFAASLogger(name=__name__) +LOG = logging.getLogger(__name__) _T = TypeVar("_T") _P = ParamSpec("_P") diff --git a/autogpt/core/agent/prompt_manager.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py similarity index 98% rename from autogpt/core/agent/prompt_manager.py rename to autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py index 5fdbb870563..cb3a47c3ec0 100644 --- a/autogpt/core/agent/prompt_manager.py +++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py @@ -25,9 +25,9 @@ AbstractChatModelResponse, ) from AFAAS.interfaces.adapters.chatmodel.wrapper import ChatCompletionKwargs, ChatModelWrapper -from AFAAS.lib.sdk.logger import AFAASLogger -from AFAAS.core.adapters.openai.chatmodel import AFAASChatOpenAI -LOG = AFAASLogger(name=__name__) +import logging +from autogpt.adapters.openai.chatmodel import AFAASChatOpenAI +LOG = logging.getLogger(__name__) # FIXME: Find somewhere more appropriate diff --git a/autogpt/core/prompt/strategy.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py similarity index 98% rename from autogpt/core/prompt/strategy.py rename to autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py index af0677dcab4..a06b3ae3fa4 100644 --- a/autogpt/core/prompt/strategy.py +++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py @@ -32,10 +32,11 @@ to_numbered_list, to_string_list, ) -from AFAAS.lib.utils.json_schema import JSONSchema -from AFAAS.lib.sdk.logger import AFAASLogger +from autogpt.core.utils.json_schema import JSONSchema -LOG = AFAASLogger(name=__name__) +import logging + +LOG = logging.getLogger(__name__) RESPONSE_SCHEMA = JSONSchema( type=JSONSchema.Type.OBJECT, properties={ diff --git a/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py similarity index 100% rename from autogpt/interfaces/adapters/adapters/chatmodel/__init__.py rename to autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py diff --git a/autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py similarity index 97% rename from autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py rename to autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py index 7e271d88a0d..47b3b3b83fd 100644 --- a/autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py @@ -3,10 +3,10 @@ import enum from typing import ClassVar, Optional, Literal from pydantic import BaseModel -from AFAAS.lib.sdk.logger import AFAASLogger +import logging from langchain_core.messages import ChatMessage, HumanMessage, SystemMessage, AIMessage, FunctionMessage -LOG = AFAASLogger(name=__name__) +LOG = logging.getLogger(__name__) class AbstractRoleLabels(abc.ABC, BaseModel): USER: str diff --git a/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py similarity index 98% rename from autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py rename to autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py index 71ea884a1ad..b6c2477e9f4 100644 --- a/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py @@ -22,13 +22,13 @@ ModelProviderService, AbstractPromptConfiguration, ) -from AFAAS.lib.utils.json_schema import JSONSchema +from autogpt.core.utils.json_schema import JSONSchema from openai import APIError, RateLimitError from openai.resources import AsyncCompletions -from AFAAS.lib.sdk.logger import AFAASLogger -LOG = AFAASLogger(name=__name__) +import logging +LOG = logging.getLogger(__name__) from langchain_core.messages import ChatMessage from AFAAS.interfaces.adapters.chatmodel.chatmessage import AssistantChatMessage diff --git a/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py similarity index 100% rename from autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py rename to autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py diff --git a/autogpt/interfaces/adapters/adapters/configuration.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/configuration.py similarity index 100% rename from autogpt/interfaces/adapters/adapters/configuration.py rename to autogpts/autogpt/autogpt/interfaces/adapters/adapters/configuration.py diff --git a/autogpt/interfaces/adapters/adapters/language_model.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py similarity index 98% rename from autogpt/interfaces/adapters/adapters/language_model.py rename to autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py index 3060fbed8f7..6db843ea50b 100644 --- a/autogpt/interfaces/adapters/adapters/language_model.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py @@ -5,7 +5,7 @@ from typing import Callable, ClassVar, Protocol, Optional, Any from pydantic import ConfigDict, BaseModel, Field -from AFAAS.lib.sdk.logger import AFAASLogger +import logging from AFAAS.configs.schema import SystemConfiguration, Field from AFAAS.interfaces.adapters.configuration import ( @@ -15,7 +15,7 @@ BaseProviderUsage, ) -LOG = AFAASLogger(name=__name__) +LOG = logging.getLogger(__name__) class ModelProviderService(str, enum.Enum): """A ModelService describes what kind of service the model provides.""" diff --git a/autogpts/autogpt/autogpt/interfaces/utils.py b/autogpts/autogpt/autogpt/interfaces/utils.py new file mode 100644 index 00000000000..1399874e2e8 --- /dev/null +++ b/autogpts/autogpt/autogpt/interfaces/utils.py @@ -0,0 +1,97 @@ +import ast +import json +import re + +from AFAAS.lib.sdk.logger import AFAASLogger + +LOG = AFAASLogger(name=__name__) + + +def to_numbered_list( + items: list[str], no_items_response: str = "", **template_args +) -> str: + if items: + return "\n".join( + f"{i+1}. {item.format(**template_args)}" for i, item in enumerate(items) + ) + else: + return no_items_response + + +def indent(content: str, indentation: int | str = 4) -> str: + if type(indentation) == int: + indentation = " " * indentation + return indentation + content.replace("\n", f"\n{indentation}") + + +def to_dotted_list( + items: list[str], no_items_response: str = "", **template_args +) -> str: + if items: + return "\n".join( + f" - {item.format(**template_args)}" for i, item in enumerate(items) + ) + else: + return no_items_response + + +def to_string_list(string_list) -> str: + if not string_list: + raise ValueError("Input list cannot be empty") + + formatted_string = ", ".join(string_list[:-1]) + ", and " + string_list[-1] + return formatted_string + + +def to_md_quotation(text): + """ + Transforms a given string into a Markdown blockquote. + + Parameters: + text (str): The string to be transformed. + + Returns: + str: The transformed string as a Markdown blockquote. + """ + # Split the text into lines + lines = text.split("\n") + + # Prefix each line with "> " + quoted_lines = [f"> {line}" for line in lines] + + # Join the lines back into a single string + quoted_text = "\n".join(quoted_lines) + + return quoted_text + + +def json_loads(json_str: str): + # TODO: this is a hack function for now. Trying to see what errors show up in testing. + # Can hopefully just replace with a call to ast.literal_eval (the function api still + # sometimes returns json strings with minor issues like trailing commas). + + try: + json_str = json_str[json_str.index("{") : json_str.rindex("}") + 1] + return ast.literal_eval(json_str) + except Exception as e: + LOG.warning(f"First attempt failed: {e}. Trying JSON.loads()") + try: + return json.loads(json_str) + except Exception as e: + try: + LOG.warning(f"JSON decode error {e}. trying literal eval") + + def replacer(match): + # Escape newlines in the matched value + return match.group(0).replace("\n", "\\n").replace("\t", "\\t") + + # Find string values and apply the replacer function to each + json_str = re.sub(r'".+?"', replacer, json_str) + return ast.literal_eval(json_str) + + # NOTE: BACKUP PLAN : + # json_str = escape_backslaches_in_json_values(json_str) # DOUBLE BACKSLASHES + # return_json_value = ast.literal_eval(json_str) + # return remove_double_ backslaches(return_json_value) # CONVERT DOUBLE + except Exception: + breakpoint() diff --git a/autogpts/autogpt/tests/vcr_cassettes b/autogpts/autogpt/tests/vcr_cassettes index 03bf1eb089c..1896d8ac12f 160000 --- a/autogpts/autogpt/tests/vcr_cassettes +++ b/autogpts/autogpt/tests/vcr_cassettes @@ -1 +1 @@ -Subproject commit 03bf1eb089c19217a08ec97e814b6a3b65b4affc +Subproject commit 1896d8ac12ff1d27b7e9e5db6549abc38b260b40 From 781d6469eeb36b854c173836b7b9aa1495624417 Mon Sep 17 00:00:00 2001 From: ph-ausseil Date: Fri, 23 Feb 2024 18:18:43 +0100 Subject: [PATCH 5/7] Change imports --- .../autogpt/adapters/openai/chatmodel.py | 6 +++--- .../autogpt/adapters/openai/configuration.py | 8 ++++---- .../prompt_strategies/prompt_manager.py | 20 +++++++++---------- .../agents/prompt_strategies/strategy.py | 14 ++++++------- .../adapters/adapters/chatmodel/__init__.py | 6 +++--- .../adapters/adapters/chatmodel/chatmodel.py | 4 ++-- .../adapters/adapters/chatmodel/wrapper.py | 6 +++--- .../adapters/adapters/configuration.py | 3 ++- .../adapters/adapters/language_model.py | 5 +++-- 9 files changed, 37 insertions(+), 35 deletions(-) diff --git a/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py b/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py index fcedb9664b3..eb31bbea8db 100644 --- a/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py +++ b/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py @@ -14,14 +14,14 @@ ) from langchain_core.messages import AIMessage , ChatMessage -from AFAAS.configs.schema import Configurable -from AFAAS.interfaces.adapters.chatmodel.chatmodel import ( +from autogpt.core.configuration import Configurable +from autogpt.interfaces.adapters.chatmodel.chatmodel import ( AbstractChatModelProvider, AbstractChatModelResponse, AssistantChatMessage, CompletionModelFunction, ) -from AFAAS.interfaces.adapters.language_model import ModelTokenizer, BaseModelResponse +from autogpt.interfaces.adapters.language_model import ModelTokenizer, BaseModelResponse import logging LOG = logging.getLogger(__name__) diff --git a/autogpts/autogpt/autogpt/adapters/openai/configuration.py b/autogpts/autogpt/autogpt/adapters/openai/configuration.py index 72f8ebf22e6..a46e763dfd6 100644 --- a/autogpts/autogpt/autogpt/adapters/openai/configuration.py +++ b/autogpts/autogpt/autogpt/adapters/openai/configuration.py @@ -5,12 +5,12 @@ from openai import AsyncOpenAI -from AFAAS.configs.schema import Field -from AFAAS.interfaces.adapters.chatmodel import ( +from autogpt.core.configuration import Field +from autogpt.interfaces.adapters.chatmodel import ( ChatModelInfo, ) -from AFAAS.interfaces.adapters.chatmodel.chatmessage import AbstractChatMessage, AbstractRoleLabels -from AFAAS.interfaces.adapters.language_model import ( +from autogpt.interfaces.adapters.chatmodel.chatmessage import AbstractChatMessage, AbstractRoleLabels +from autogpt.interfaces.adapters.language_model import ( AbstractPromptConfiguration, BaseModelProviderBudget, BaseModelProviderConfiguration, diff --git a/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py index cb3a47c3ec0..73cbb1daf46 100644 --- a/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py +++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py @@ -6,25 +6,25 @@ from pydantic import BaseModel, ConfigDict from typing import TYPE_CHECKING, Any -from AFAAS.interfaces.agent.assistants.prompt_manager import AbstractPromptManager , LLMConfig +from autogpt.interfaces.agent.assistants.prompt_manager import AbstractPromptManager , LLMConfig from AFAAS.prompts import BaseTaskRagStrategy, load_all_strategies -from AFAAS.interfaces.adapters.language_model import AbstractPromptConfiguration -from AFAAS.interfaces.adapters.chatmodel.chatmodel import ChatPrompt -from AFAAS.interfaces.agent.features.agentmixin import AgentMixin -from AFAAS.interfaces.prompts.strategy import AbstractPromptStrategy +from autogpt.interfaces.adapters.language_model import AbstractPromptConfiguration +from autogpt.interfaces.adapters.chatmodel.chatmodel import ChatPrompt +from autogpt.core.configuration import SystemConfiguration +from autogpt.interfaces.prompts.strategy import AbstractPromptStrategy if TYPE_CHECKING: - from AFAAS.interfaces.prompts.strategy import ( + from autogpt.interfaces.prompts.strategy import ( AbstractPromptStrategy) - from AFAAS.interfaces.agent.main import BaseAgent + from autogpt.interfaces.agent.main import BaseAgent -from AFAAS.interfaces.adapters.chatmodel import ( +from autogpt.interfaces.adapters.chatmodel import ( AbstractChatModelProvider, AbstractChatModelResponse, ) -from AFAAS.interfaces.adapters.chatmodel.wrapper import ChatCompletionKwargs, ChatModelWrapper +from autogpt.interfaces.adapters.chatmodel.wrapper import ChatCompletionKwargs, ChatModelWrapper import logging from autogpt.adapters.openai.chatmodel import AFAASChatOpenAI LOG = logging.getLogger(__name__) @@ -37,7 +37,7 @@ class SystemInfo(dict): api_budget: float current_time: str -class BasePromptManager(AgentMixin, AbstractPromptManager): +class BasePromptManager( AbstractPromptManager): def __init__( self, diff --git a/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py index a06b3ae3fa4..6820f674c3f 100644 --- a/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py +++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py @@ -10,21 +10,21 @@ from langchain_core.messages import ChatMessage, AIMessage -from AFAAS.configs.schema import SystemConfiguration +from autogpt.core.configuration import SystemConfiguration if TYPE_CHECKING: - from AFAAS.interfaces.task.task import AbstractTask + from autogpt.interfaces.task.task import AbstractTask -from AFAAS.interfaces.agent.features.agentmixin import AgentMixin -from AFAAS.interfaces.adapters.language_model import AbstractLanguageModelProvider, AbstractPromptConfiguration -from AFAAS.interfaces.adapters.chatmodel import ( +from autogpt.core.configuration import SystemConfiguration +from autogpt.interfaces.adapters.language_model import AbstractLanguageModelProvider, AbstractPromptConfiguration +from autogpt.interfaces.adapters.chatmodel import ( ChatPrompt, AbstractChatModelProvider, AbstractChatModelResponse, AssistantChatMessage, CompletionModelFunction, ) -from AFAAS.interfaces.prompts.utils.utils import ( +from autogpt.interfaces.utils import ( indent, json_loads, to_dotted_list, @@ -96,7 +96,7 @@ class PromptStrategiesConfiguration(SystemConfiguration): # presence_penalty: Optional[float] = None # Avoid certain subjects -class AbstractPromptStrategy(AgentMixin, abc.ABC): +class AbstractPromptStrategy( abc.ABC): STRATEGY_NAME: str default_configuration: PromptStrategiesConfiguration diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py index dd5f4753bfe..a5173f2e51e 100644 --- a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py @@ -1,5 +1,5 @@ from __future__ import annotations -from AFAAS.interfaces.adapters.chatmodel.chatmodel import ( +from autogpt.interfaces.adapters.chatmodel.chatmodel import ( AbstractChatModelProvider, AbstractChatModelResponse, ChatModelInfo, @@ -8,7 +8,7 @@ ) -from AFAAS.interfaces.adapters.chatmodel.chatmessage import ( +from autogpt.interfaces.adapters.chatmodel.chatmessage import ( AbstractChatMessage, AIMessage, ChatMessage, @@ -20,7 +20,7 @@ AssistantFunctionCall, ) -from AFAAS.interfaces.adapters.chatmodel.wrapper import ( +from autogpt.interfaces.adapters.chatmodel.wrapper import ( ChatCompletionKwargs, ChatModelWrapper, ) diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py index b6c2477e9f4..186288298ca 100644 --- a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py @@ -15,7 +15,7 @@ from langchain_core.language_models.chat_models import BaseChatModel -from AFAAS.interfaces.adapters.language_model import ( +from autogpt.interfaces.adapters.language_model import ( AbstractLanguageModelProvider, BaseModelInfo, BaseModelResponse, @@ -31,7 +31,7 @@ LOG = logging.getLogger(__name__) from langchain_core.messages import ChatMessage -from AFAAS.interfaces.adapters.chatmodel.chatmessage import AssistantChatMessage +from autogpt.interfaces.adapters.chatmodel.chatmessage import AssistantChatMessage class CompletionModelFunction(BaseModel): diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py index bff24b70253..837a3a31a20 100644 --- a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py @@ -1,13 +1,13 @@ from pydantic import BaseModel -from AFAAS.interfaces.adapters.chatmodel.chatmodel import LOG, _RetryHandler, AbstractChatModelProvider, AbstractChatModelResponse, CompletionModelFunction -from AFAAS.interfaces.adapters.language_model import AbstractPromptConfiguration +from autogpt.interfaces.adapters.chatmodel.chatmodel import LOG, _RetryHandler, AbstractChatModelProvider, AbstractChatModelResponse, CompletionModelFunction +from autogpt.interfaces.adapters.language_model import AbstractPromptConfiguration from typing import ( Callable, TypeVar, Optional ) -from AFAAS.interfaces.adapters.chatmodel.chatmessage import AssistantChatMessage +from autogpt.interfaces.adapters.chatmodel.chatmessage import AssistantChatMessage from langchain_core.language_models.chat_models import BaseChatModel from langchain_core.messages import ChatMessage diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/configuration.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/configuration.py index f56d679b934..5bee99b0b84 100644 --- a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/configuration.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/configuration.py @@ -3,7 +3,8 @@ from pydantic import BaseModel, SecretBytes, SecretStr from AFAAS.configs.config import SystemSettings, UserConfigurable -from AFAAS.configs.schema import SystemConfiguration, update_model_config +from autogpt.core.configuration import SystemConfiguration +, update_model_config from pydantic import ConfigDict from pydantic.fields import Field diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py index 6db843ea50b..57f923d88c7 100644 --- a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py @@ -7,8 +7,9 @@ from pydantic import ConfigDict, BaseModel, Field import logging -from AFAAS.configs.schema import SystemConfiguration, Field -from AFAAS.interfaces.adapters.configuration import ( +from autogpt.core.configuration import SystemConfiguration +, Field +from autogpt.interfaces.adapters.configuration import ( BaseProviderBudget, BaseProviderCredentials, BaseProviderSettings, From 40181cd21ad7459900b5c5f9aeabaf590c3f03ea Mon Sep 17 00:00:00 2001 From: ph-ausseil Date: Fri, 23 Feb 2024 18:49:54 +0100 Subject: [PATCH 6/7] Delete AFAAS/core/adapters/openai/__init__.py Signed-off-by: ph-ausseil --- AFAAS/core/adapters/openai/__init__.py | 1 - 1 file changed, 1 deletion(-) delete mode 100644 AFAAS/core/adapters/openai/__init__.py diff --git a/AFAAS/core/adapters/openai/__init__.py b/AFAAS/core/adapters/openai/__init__.py deleted file mode 100644 index 9d48db4f9f8..00000000000 --- a/AFAAS/core/adapters/openai/__init__.py +++ /dev/null @@ -1 +0,0 @@ -from __future__ import annotations From 02b9d7418512033a5ed6d21a2652e6404de231b8 Mon Sep 17 00:00:00 2001 From: ph-ausseil Date: Sat, 24 Feb 2024 11:49:14 +0100 Subject: [PATCH 7/7] Remove has_oa_function_call_api * Remove has_oa_function_call_api * Create AbstractChatModelProvider.llm_api_client for dependency injection of the client * Improve defaulting mechanism when looking for a model name --- .../autogpt/adapters/openai/chatmodel.py | 49 ++++---- .../autogpt/adapters/openai/configuration.py | 17 +-- .../prompt_strategies/prompt_manager.py | 10 +- .../agents/prompt_strategies/strategy.py | 21 +--- .../{adapters => }/chatmodel/__init__.py | 0 .../{adapters => }/chatmodel/chatmessage.py | 0 .../{adapters => }/chatmodel/chatmodel.py | 52 +++++--- .../{adapters => }/chatmodel/wrapper.py | 3 - .../adapters/{adapters => }/configuration.py | 0 .../adapters/{adapters => }/language_model.py | 117 +++++++++--------- 10 files changed, 134 insertions(+), 135 deletions(-) rename autogpts/autogpt/autogpt/interfaces/adapters/{adapters => }/chatmodel/__init__.py (100%) rename autogpts/autogpt/autogpt/interfaces/adapters/{adapters => }/chatmodel/chatmessage.py (100%) rename autogpts/autogpt/autogpt/interfaces/adapters/{adapters => }/chatmodel/chatmodel.py (83%) rename autogpts/autogpt/autogpt/interfaces/adapters/{adapters => }/chatmodel/wrapper.py (98%) rename autogpts/autogpt/autogpt/interfaces/adapters/{adapters => }/configuration.py (100%) rename autogpts/autogpt/autogpt/interfaces/adapters/{adapters => }/language_model.py (83%) diff --git a/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py b/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py index eb31bbea8db..e9c6e47f452 100644 --- a/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py +++ b/autogpts/autogpt/autogpt/adapters/openai/chatmodel.py @@ -1,5 +1,5 @@ import os -from typing import Any, Callable, Dict, ParamSpec, Tuple, TypeVar +from typing import Any, Callable, Dict, ParamSpec, Tuple, TypeVar, Optional import tiktoken from openai.resources import AsyncCompletions @@ -21,7 +21,7 @@ AssistantChatMessage, CompletionModelFunction, ) -from autogpt.interfaces.adapters.language_model import ModelTokenizer, BaseModelResponse +from autogpt.interfaces.adapters.language_model import ModelTokenizer, LanguageModelResponse import logging LOG = logging.getLogger(__name__) @@ -29,20 +29,29 @@ _T = TypeVar("_T") _P = ParamSpec("_P") +# Example : LangChain Client +from langchain_openai import ChatOpenAI +from langchain_community.callbacks import get_openai_callback, OpenAICallbackHandler -class AFAASChatOpenAI(Configurable[OpenAISettings], AbstractChatModelProvider): +# Example : OAClient : +# from openai import AsyncOpenAI - def __llmmodel_default__(self) : - return "gpt-3.5-turbo" +class ChatOpenAIAdapter(Configurable[OpenAISettings], AbstractChatModelProvider): - def __llmmodel_cheap__(self) : - return "gpt-3.5-turbo" + # Example : LangChain Client + callback : Optional[OpenAICallbackHandler] = None + llm_api_client = ChatOpenAI() + + # Example : OAClient : + # llm_model = = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"]) + + llmmodel_default : str = "gpt-3.5-turbo" + llmmodel_fine_tuned : str = "gpt-3.5-turbo" + llmmodel_cheap : str = "gpt-3.5-turbo" + llmmodel_code_expert_model : str = "gpt-3.5-turbo" + llmmodel_long_context_model : str = "gpt-3.5-turbo" - def __llmmodel_code_expert_model__(self) : - return "gpt-3.5-turbo" - def __llmmodel_long_context_model__(self) : - return "gpt-3.5-turbo" def __init__( self, @@ -112,7 +121,7 @@ def extract_response_details( self, response: AsyncCompletions, model_name: str ) -> Tuple[Dict[str, Any], Dict[str, Any]]: if (isinstance(response, AsyncCompletions)) : - response_args = BaseModelResponse( + response_args = LanguageModelResponse( llm_model_info=OPEN_AI_CHAT_MODELS[model_name], prompt_tokens=response.usage.prompt_tokens, completion_tokens=response.usage.completion_tokens, @@ -120,7 +129,7 @@ def extract_response_details( #response_message = response.choices[0].message.model_dump() elif (isinstance(response, AIMessage)) : # AGPT retro compatibility - response_args = BaseModelResponse( + response_args = LanguageModelResponse( llm_model_info=OPEN_AI_CHAT_MODELS[model_name], prompt_tokens= self.callback.prompt_tokens, completion_tokens= self.callback.completion_tokens, @@ -167,9 +176,6 @@ def formulate_final_response( def __repr__(self): return "OpenAIProvider()" - def has_oa_tool_calls_api(self, model_name: str) -> bool: - return True # Always True for OpenAI - def get_default_config(self) -> OpenAIPromptConfiguration: LOG.warning(f"Using {__class__.__name__} default config, we recommend setting individual model configs") return OPEN_AI_DEFAULT_CHAT_CONFIGS.SMART_MODEL_32K @@ -212,16 +218,9 @@ async def chat( self, messages: list[ChatMessage], *_, **llm_kwargs ) -> AsyncCompletions: - # Example : LangChain Client - from langchain_community.callbacks import get_openai_callback, OpenAICallbackHandler - from langchain_openai import ChatOpenAI - self.callback : OpenAICallbackHandler - self.llm_model = ChatOpenAI() #model = "gpt-3.5-turbo" , temperature=0.5 ) with get_openai_callback() as callback: self.callback : OpenAICallbackHandler = callback - return await self.llm_model.ainvoke(input = messages , **llm_kwargs) + return await self.llm_api_client.ainvoke(input = messages , **llm_kwargs) # Example : OAClient : - # aclient = AsyncOpenAI(api_key=os.environ["OPENAI_API_KEY"]) - # self.llm_model = aclient.chat - # return await aclient.chat.completions.create( messages=messages, **llm_kwargs ) + # return await self.llm_api_client.chat.completions.create( messages=messages, **llm_kwargs ) diff --git a/autogpts/autogpt/autogpt/adapters/openai/configuration.py b/autogpts/autogpt/autogpt/adapters/openai/configuration.py index a46e763dfd6..9837db7e14c 100644 --- a/autogpts/autogpt/autogpt/adapters/openai/configuration.py +++ b/autogpts/autogpt/autogpt/adapters/openai/configuration.py @@ -12,10 +12,10 @@ from autogpt.interfaces.adapters.chatmodel.chatmessage import AbstractChatMessage, AbstractRoleLabels from autogpt.interfaces.adapters.language_model import ( AbstractPromptConfiguration, - BaseModelProviderBudget, - BaseModelProviderConfiguration, + LanguageModelProviderBudget, + LanguageModelProviderConfiguration, BaseModelProviderCredentials, - BaseModelProviderSettings, + LanguageModelProviderSettings, BaseModelProviderUsage, ModelProviderName, ModelProviderService, @@ -62,7 +62,6 @@ class OpenAIModelName(str, enum.Enum): prompt_token_cost=0.0015 / 1000, completion_token_cost=0.002 / 1000, max_tokens=4096, - has_function_call_api=True, ), ChatModelInfo( name=OpenAIModelName.GPT3_16k, @@ -71,7 +70,6 @@ class OpenAIModelName(str, enum.Enum): prompt_token_cost=0.003 / 1000, completion_token_cost=0.004 / 1000, max_tokens=16384, - has_function_call_api=True, ), ChatModelInfo( name=OpenAIModelName.GPT3_FINE_TUNED, @@ -80,7 +78,6 @@ class OpenAIModelName(str, enum.Enum): prompt_token_cost=0.0120 / 1000, completion_token_cost=0.0160 / 1000, max_tokens=4096, - has_function_call_api=True, ), ChatModelInfo( name=OpenAIModelName.GPT4, @@ -89,7 +86,6 @@ class OpenAIModelName(str, enum.Enum): prompt_token_cost=0.03 / 1000, completion_token_cost=0.06 / 1000, max_tokens=8191, - has_function_call_api=True, ), ChatModelInfo( name=OpenAIModelName.GPT4_32k, @@ -98,7 +94,6 @@ class OpenAIModelName(str, enum.Enum): prompt_token_cost=0.06 / 1000, completion_token_cost=0.12 / 1000, max_tokens=32768, - has_function_call_api=True, ), ] } @@ -109,10 +104,10 @@ class OpenAIModelName(str, enum.Enum): } -class OpenAIProviderConfiguration(BaseModelProviderConfiguration): +class OpenAIProviderConfiguration(LanguageModelProviderConfiguration): ... -class OpenAIModelProviderBudget(BaseModelProviderBudget): +class OpenAIModelProviderBudget(LanguageModelProviderBudget): graceful_shutdown_threshold: float = Field(default=0.005) warning_threshold: float = Field(default=0.01) @@ -122,7 +117,7 @@ class OpenAIModelProviderBudget(BaseModelProviderBudget): usage: BaseModelProviderUsage = BaseModelProviderUsage() -class OpenAISettings(BaseModelProviderSettings): +class OpenAISettings(LanguageModelProviderSettings): configuration: OpenAIProviderConfiguration = OpenAIProviderConfiguration() credentials: BaseModelProviderCredentials = BaseModelProviderCredentials() budget: OpenAIModelProviderBudget = OpenAIModelProviderBudget() diff --git a/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py index 73cbb1daf46..32df0b75b15 100644 --- a/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py +++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/prompt_manager.py @@ -26,7 +26,7 @@ ) from autogpt.interfaces.adapters.chatmodel.wrapper import ChatCompletionKwargs, ChatModelWrapper import logging -from autogpt.adapters.openai.chatmodel import AFAASChatOpenAI +from autogpt.adapters.openai.chatmodel import ChatOpenAIAdapter LOG = logging.getLogger(__name__) @@ -42,10 +42,10 @@ class BasePromptManager( AbstractPromptManager): def __init__( self, config : LLMConfig = LLMConfig( - default = AFAASChatOpenAI(), - cheap = AFAASChatOpenAI(), - long_context = AFAASChatOpenAI(), - code_expert = AFAASChatOpenAI(), + default = ChatOpenAIAdapter(), + cheap = ChatOpenAIAdapter(), + long_context = ChatOpenAIAdapter(), + code_expert = ChatOpenAIAdapter(), ), ) -> None: self._prompt_strategies = {} diff --git a/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py b/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py index 6820f674c3f..36f1eda4e99 100644 --- a/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py +++ b/autogpts/autogpt/autogpt/agents/prompt_strategies/strategy.py @@ -157,17 +157,9 @@ def response_format_instruction( ) -> str: language_model_provider = self.get_llm_provider() model_name = self.get_prompt_config().llm_model_name - use_oa_tools_api = language_model_provider.has_oa_tool_calls_api( - model_name=model_name - ) - response_schema = RESPONSE_SCHEMA.copy(deep=True) - if ( - use_oa_tools_api - and response_schema.properties - and "command" in response_schema.properties - ): - del response_schema.properties["command"] + #response_schema = RESPONSE_SCHEMA.copy(deep=True) + response_schema = RESPONSE_SCHEMA # Unindent for performance response_format: str = re.sub( @@ -176,15 +168,8 @@ def response_format_instruction( response_schema.to_typescript_object_interface("Response"), ) - if use_oa_tools_api: - return ( - f"Respond strictly with a JSON of type `Response` :\n" - f"{response_format}" - ) - return ( - f"Respond strictly with JSON{', and also specify a command to use through a tool_calls' if use_oa_tools_api else ''}. " - "The JSON should be compatible with the TypeScript type `Response` from the following:\n" + f"Respond strictly with JSON. The JSON should be compatible with the TypeScript type `Response` from the following:\n" f"{response_format}" ) diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py b/autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/__init__.py similarity index 100% rename from autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/__init__.py rename to autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/__init__.py diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py b/autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/chatmessage.py similarity index 100% rename from autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmessage.py rename to autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/chatmessage.py diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py b/autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/chatmodel.py similarity index 83% rename from autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py rename to autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/chatmodel.py index 186288298ca..2664f79eef0 100644 --- a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/chatmodel.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/chatmodel.py @@ -10,15 +10,17 @@ Optional, TypeVar, ParamSpec, + Union, ) -from pydantic import BaseModel, Field +from pydantic import BaseModel, Field, ConfigDict from langchain_core.language_models.chat_models import BaseChatModel +from openai import AsyncOpenAI from autogpt.interfaces.adapters.language_model import ( AbstractLanguageModelProvider, - BaseModelInfo, - BaseModelResponse, + LanguageModelInfo, + LanguageModelResponse, ModelProviderService, AbstractPromptConfiguration, ) @@ -89,7 +91,7 @@ def __str__(self): _T = TypeVar("_T") -class AbstractChatModelResponse(BaseModelResponse, Generic[_T]): +class AbstractChatModelResponse(LanguageModelResponse, Generic[_T]): response: Optional[AssistantChatMessage] = None parsed_result: _T = None @@ -100,15 +102,25 @@ class AbstractChatModelResponse(BaseModelResponse, Generic[_T]): system_prompt: str = None -class ChatModelInfo(BaseModelInfo): +class ChatModelInfo(LanguageModelInfo): llm_service : ModelProviderService = ModelProviderService.CHAT max_tokens: int - has_function_call_api: bool = False class AbstractChatModelProvider(AbstractLanguageModelProvider): - #llm_model : Optional[BaseChatModel] = None + model_config: ConfigDict = ConfigDict( + extra= "allow", + ) + + llm_api_client : Union [BaseChatModel , AsyncOpenAI , Any] + + llmmodel_default : str + llmmodel_default : Optional[str] = None + llmmodel_fine_tuned : Optional[str] = None + llmmodel_cheap : Optional[str] = None + llmmodel_code_expert_model : Optional[str] = None + llmmodel_long_context_model : Optional[str] = None @abc.abstractmethod def count_message_tokens( @@ -143,10 +155,6 @@ def make_tools_arg(self, tools : list[CompletionModelFunction]) -> dict: def make_tool_choice_arg(self , name : str) -> dict: ... - @abc.abstractmethod - def has_oa_tool_calls_api(self, model_name: str) -> bool: - ... - @abc.abstractmethod def get_default_config(self) -> AbstractPromptConfiguration: ... @@ -177,14 +185,28 @@ def __getattribute__(self, __name: str): return super().__getattribute__(__name) try: - return super().__getattribute__(__name) + model_name = super().__getattribute__(__name) + if model_name is not None: + return model_name except AttributeError: - return self.__llmmodel_default__() + LOG.warning(f"Model name {__name} not found in {self.__class__.__name__} , defaulting to {self.llmmodel_default}") + + return self.__llmmodel_default__() - @abc.abstractmethod def __llmmodel_default__(self) -> str: - ... + return self.llmmodel_default + + def __llmmodel_cheap__(self) -> str: + return self.llmmodel_cheap + + def __llmmodel_code_expert_model__(self) -> str: + return self.llmmodel_code_expert_model + + def __llmmodel_long_context_model__(self) -> str: + return self.llmmodel_long_context_model + def __llmmodel_fine_tuned__(self) -> str: + return self.llmmodel_fine_tuned _P = ParamSpec("_P") diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py b/autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/wrapper.py similarity index 98% rename from autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py rename to autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/wrapper.py index 837a3a31a20..05b3ca19388 100644 --- a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/chatmodel/wrapper.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/chatmodel/wrapper.py @@ -215,9 +215,6 @@ async def _chat( return return_value - def has_oa_tool_calls_api(self, model_name: str) -> bool: - self.llm_adapter.has_oa_tool_calls_api(model_name) - def get_default_config(self) -> AbstractPromptConfiguration: return self.llm_adapter.get_default_config() diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/configuration.py b/autogpts/autogpt/autogpt/interfaces/adapters/configuration.py similarity index 100% rename from autogpts/autogpt/autogpt/interfaces/adapters/adapters/configuration.py rename to autogpts/autogpt/autogpt/interfaces/adapters/configuration.py diff --git a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py b/autogpts/autogpt/autogpt/interfaces/adapters/language_model.py similarity index 83% rename from autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py rename to autogpts/autogpt/autogpt/interfaces/adapters/language_model.py index 57f923d88c7..a14e1d6c381 100644 --- a/autogpts/autogpt/autogpt/interfaces/adapters/adapters/language_model.py +++ b/autogpts/autogpt/autogpt/interfaces/adapters/language_model.py @@ -8,7 +8,7 @@ import logging from autogpt.core.configuration import SystemConfiguration -, Field + from autogpt.interfaces.adapters.configuration import ( BaseProviderBudget, BaseProviderCredentials, @@ -28,31 +28,19 @@ class ModelProviderService(str, enum.Enum): class ModelProviderName(str, enum.Enum): OPENAI: str = "openai" -class BaseModelInfo(BaseModel): - name: str - service: ModelProviderService - provider_name: ModelProviderName - prompt_token_cost: float = 0.0 - completion_token_cost: float = 0.0 - - -class BaseModelResponse(BaseModel): - prompt_tokens : int - completion_tokens: int - llm_model_info: BaseModelInfo - strategy: Optional[Any] = None # TODO: Should save the strategy used to get the response - def __init__(self, **data: Any): - super().__init__(**data) - LOG.debug(f"BaseModelResponse does not save the strategy") +class AbstractPromptConfiguration(abc.ABC, SystemConfiguration): + """Struct for model configuration.""" + llm_model_name: str = Field() + temperature: float = Field() -class BaseModelProviderConfiguration(SystemConfiguration): - extra_request_headers: dict[str, str] = Field(default_factory=dict) - retries_per_request: int = Field(default=10) - maximum_retry: int = 1 - maximum_retry_before_default_function: int = 1 +class ModelTokenizer(Protocol): + @abc.abstractmethod + def encode(self, text: str) -> list: ... + @abc.abstractmethod + def decode(self, tokens: list) -> str: ... class BaseModelProviderCredentials(BaseProviderCredentials): api_key: str | None = Field(default=None) @@ -70,7 +58,7 @@ class BaseModelProviderUsage(BaseProviderUsage): def update_usage( self, - model_response: BaseModelResponse, + model_response: LanguageModelResponse, ) -> None: self.completion_tokens += model_response.completion_tokens self.prompt_tokens += model_response.prompt_tokens @@ -80,8 +68,51 @@ def update_usage( ) +class BaseModelProviderConfiguration(SystemConfiguration): + maximum_retry: int = 1 + +class BaseModelProviderSettings(BaseProviderSettings): + configuration: BaseModelProviderConfiguration + credentials: BaseModelProviderCredentials + budget: BaseProviderBudget + + + +class AbstractModelProvider(abc.ABC): + default_settings: ClassVar[LanguageModelProviderSettings] + + @abc.abstractmethod + def get_remaining_budget(self) -> float: ... + +class LanguageModelInfo(BaseModel): + name: str + service: ModelProviderService + provider_name: ModelProviderName + prompt_token_cost: float = 0.0 + completion_token_cost: float = 0.0 + + +class LanguageModelResponse(BaseModel): + prompt_tokens : int + completion_tokens: int + llm_model_info: LanguageModelInfo + strategy: Optional[Any] = None # TODO: Should save the strategy used to get the response + + def __init__(self, **data: Any): + super().__init__(**data) + LOG.debug(f"BaseModelResponse does not save the strategy") -class BaseModelProviderBudget(BaseProviderBudget): +class LanguageModelProviderSettings(BaseModelProviderSettings): + configuration: LanguageModelProviderConfiguration + credentials: BaseModelProviderCredentials + budget: LanguageModelProviderBudget + +class LanguageModelProviderConfiguration(BaseModelProviderConfiguration): + extra_request_headers: dict[str, str] = Field(default_factory=dict) + retries_per_request: int = Field(default=10) + maximum_retry_before_default_function: int = 1 + +class LanguageModelProviderBudget(BaseProviderBudget): total_budget: float = Field() total_cost: float remaining_budget: float @@ -89,31 +120,23 @@ class BaseModelProviderBudget(BaseProviderBudget): def update_usage_and_cost( self, - model_response: BaseModelResponse, + model_response: LanguageModelResponse, ) -> None: """Update the usage and cost of the provider.""" llm_model_info = model_response.llm_model_info self.usage.update_usage(model_response) incurred_cost = ( model_response.completion_tokens * llm_model_info.completion_token_cost - + model_response.prompt_tokens - * llm_model_info.prompt_token_cost + + model_response.prompt_tokens * llm_model_info.prompt_token_cost ) self.total_cost += incurred_cost if abs(self.remaining_budget) != float("inf"): self.remaining_budget -= incurred_cost -class BaseModelProviderSettings(BaseProviderSettings): - configuration: BaseModelProviderConfiguration - credentials: BaseModelProviderCredentials - budget: BaseModelProviderBudget - - -class AbstractModelProvider(abc.ABC): - default_settings: ClassVar[BaseModelProviderSettings] +class AbstractLanguageModelProvider(AbstractModelProvider): - _configuration: BaseModelProviderConfiguration + _configuration: LanguageModelProviderConfiguration @abc.abstractmethod def count_tokens(self, text: str, model_name: str) -> int: ... @@ -124,27 +147,5 @@ def get_tokenizer(self, model_name: str) -> "ModelTokenizer": ... @abc.abstractmethod def get_token_limit(self, model_name: str) -> int: ... - @abc.abstractmethod - def get_remaining_budget(self) -> float: ... - - -class AbstractLanguageModelProvider(AbstractModelProvider): - @abc.abstractmethod - def has_oa_tool_calls_api(self, model_name: str) -> bool: ... - @abc.abstractmethod def get_default_config(self) -> AbstractPromptConfiguration: ... - -class ModelTokenizer(Protocol): - @abc.abstractmethod - def encode(self, text: str) -> list: ... - - @abc.abstractmethod - def decode(self, tokens: list) -> str: ... - -class AbstractPromptConfiguration(abc.ABC, SystemConfiguration): - """Struct for model configuration.""" - - llm_model_name: str = Field() - temperature: float = Field() -