diff --git a/python/packages/ai/teams/ai/data_sources/data_source.py b/python/packages/ai/teams/ai/data_sources/data_source.py index 8937f50ef..f54590ab7 100644 --- a/python/packages/ai/teams/ai/data_sources/data_source.py +++ b/python/packages/ai/teams/ai/data_sources/data_source.py @@ -17,8 +17,10 @@ class DataSource(ABC): A data source that can be used to render text that's added to a prompt. """ - name: str - "Name of the data source." + @property + @abstractmethod + def name(self) -> str: + "Name of the data source." @abstractmethod async def render_data( diff --git a/python/packages/ai/teams/ai/data_sources/text_data_source.py b/python/packages/ai/teams/ai/data_sources/text_data_source.py index b70b3a62f..afd9ff77a 100644 --- a/python/packages/ai/teams/ai/data_sources/text_data_source.py +++ b/python/packages/ai/teams/ai/data_sources/text_data_source.py @@ -38,7 +38,7 @@ def __init__(self, name: str, text: str) -> None: self._text = text @property - def get_name(self) -> str: + def name(self) -> str: """ Name of the data source. """ diff --git a/python/packages/ai/teams/ai/promptsv2/__init__.py b/python/packages/ai/teams/ai/promptsv2/__init__.py index bf3e4f3c0..48d66d29e 100644 --- a/python/packages/ai/teams/ai/promptsv2/__init__.py +++ b/python/packages/ai/teams/ai/promptsv2/__init__.py @@ -12,8 +12,13 @@ from .group_section import GroupSection from .layout_engine import LayoutEngine from .message import ImageContentPart, ImageUrl, Message, TextContentPart -from .prompt_section import PromptFunctions, PromptSection +from .prompt import Prompt +from .prompt_functions import PromptFunction, PromptFunctions +from .prompt_manager import PromptManager +from .prompt_manager_options import PromptManagerOptions +from .prompt_section import PromptSection from .prompt_section_base import PromptSectionBase +from .prompt_template import PromptTemplate from .rendered_prompt_section import RenderedPromptSection from .system_message import SystemMessage from .template_section import TemplateSection diff --git a/python/packages/ai/teams/ai/promptsv2/completion_config.py b/python/packages/ai/teams/ai/promptsv2/completion_config.py index d3296410f..a74f131aa 100644 --- a/python/packages/ai/teams/ai/promptsv2/completion_config.py +++ b/python/packages/ai/teams/ai/promptsv2/completion_config.py @@ -69,3 +69,20 @@ class CompletionConfig: stop_sequences: Optional[List[str]] = None temperature: float = 0 top_p: float = 0 + + @classmethod + def from_dict(cls, data: dict) -> "CompletionConfig": + return cls( + completion_type=data.get("completion_type"), + frequency_penalty=data.get("frequency_penalty", 0), + include_history=data.get("include_history", True), + include_input=data.get("include_input", True), + include_images=data.get("include_images", False), + max_tokens=data.get("max_tokens", 150), + max_input_tokens=data.get("max_input_tokens", 2048), + model=data.get("model"), + presence_penalty=data.get("presence_penalty", 0), + stop_sequences=data.get("stop_sequences"), + temperature=data.get("temperature", 0), + top_p=data.get("top_p", 0), + ) diff --git a/python/packages/ai/teams/ai/promptsv2/prompt_manager.py b/python/packages/ai/teams/ai/promptsv2/prompt_manager.py new file mode 100644 index 000000000..2fb7c9076 --- /dev/null +++ b/python/packages/ai/teams/ai/promptsv2/prompt_manager.py @@ -0,0 +1,301 @@ +""" +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the MIT License. +""" + +import json +import os +from copy import deepcopy +from pathlib import Path +from typing import Any, Dict, List + +from botbuilder.core import TurnContext + +from ...app_error import ApplicationError +from ...state import Memory +from ..data_sources import DataSource +from ..tokenizers import Tokenizer +from .conversation_history import ConversationHistory +from .group_section import GroupSection +from .prompt import Prompt +from .prompt_functions import PromptFunction, PromptFunctions +from .prompt_manager_options import PromptManagerOptions +from .prompt_section import PromptSection +from .prompt_template import PromptTemplate +from .prompt_template_config import PromptTemplateConfig +from .template_section import TemplateSection +from .user_input_message import UserInputMessage +from .user_message import UserMessage + + +class PromptManager(PromptFunctions): + _options: PromptManagerOptions + _data_sources: Dict[str, DataSource] + _functions: Dict[str, PromptFunction] + _prompts: Dict[str, PromptTemplate] + + def __init__(self, options: PromptManagerOptions): + """ + Creates a new 'PromptManager' instance. + + Args: + options (PromptManagerOptions): Options used to configure the prompt manager. + """ + self._options = options + self._data_sources = {} + self._functions = {} + self._prompts = {} + + @property + def options(self) -> PromptManagerOptions: + """ + Gets the configured prompt manager options. + """ + return self._options + + def add_data_source(self, data_source: DataSource) -> "PromptManager": + """ + Registers a new data source with the prompt manager. + + Args: + data_source (DataSource): Data source to add. + + Returns: + PromptManager: The prompt manager for chaining. + + Raises: + ApplicationError: If a data source with the same name already exists. + """ + if data_source.name in self._data_sources: + raise ApplicationError(f"DataSource '{data_source.name}' already exists.") + self._data_sources[data_source.name] = data_source + return self + + def get_data_source(self, name: str) -> DataSource: + """ + Looks up a data source by name. + + Args: + name (str): Name of the data source to lookup. + + Returns: + DataSource: The data source. + + Raises: + ApplicationError: If the data source is not found. + """ + if name not in self._data_sources: + raise ApplicationError(f"DataSource '{name}' not found.") + return self._data_sources[name] + + def has_data_source(self, name: str) -> bool: + """ + Checks for the existence of a named data source. + + Args: + name (str): Name of the data source to lookup. + + Returns: + bool: True if the data source exists, False otherwise. + """ + return name in self._data_sources + + def add_function(self, name: str, function: PromptFunction) -> "PromptManager": + """ + Registers a new prompt template function with the prompt manager. + + Args: + name (str): Name of the function to add. + fn (PromptFunction): Function to add. + + Returns: + PromptManager: The prompt manager for chaining. + + Raises: + ApplicationError: If a function with the same name already exists. + """ + if name in self._functions: + raise ApplicationError(f"Function '{name}' already exists.") + self._functions[name] = function + return self + + def get_function(self, name: str) -> PromptFunction: + """ + Looks up a prompt template function by name. + + Args: + name (str): Name of the function to lookup. + + Returns: + PromptFunction: The function. + + Raises: + ApplicationError: If the function is not found. + """ + if name not in self._functions: + raise ApplicationError(f"Function '{name}' not found.") + return self._functions[name] + + def has_function(self, name: str) -> bool: + """ + Checks for the existence of a named prompt template function. + + Args: + name (str): Name of the function to lookup. + + Returns: + bool: True if the function exists, False otherwise. + """ + return name in self._functions + + async def invoke_function( + self, name: str, context: TurnContext, memory: Memory, tokenizer: Tokenizer, args: List[str] + ) -> Any: + """ + Invokes a prompt template function by name. + + Args: + name (str): Name of the function to invoke. + context (TurnContext): Turn context for the current turn of conversation with the user. + memory (Memory): An interface for accessing state values. + tokenizer (Tokenizer): Tokenizer to use when rendering the prompt. + args (List[str]): Arguments to pass to the function. + + Returns: + Any: Value returned by the function. + """ + function = self.get_function(name) + return await function(context, memory, self, tokenizer, args) + + def add_prompt(self, prompt: PromptTemplate) -> "PromptManager": + """ + Registers a new prompt template with the prompt manager. + + Args: + prompt (PromptTemplate): Prompt template to add. + + Returns: + PromptManager: The prompt manager for chaining. + + Raises: + ApplicationError: If a prompt with the same name already exists. + """ + if prompt.name in self._prompts: + raise ApplicationError( + ( + "The PromptManager.add_prompt() method was called with a " + f"previously registered prompt named '{prompt.name}'." + ) + ) + + # Clone and cache prompt + self._prompts[prompt.name] = deepcopy(prompt) + return self + + async def get_prompt(self, name) -> PromptTemplate: + """ + Loads a named prompt template from the filesystem. + + The template will be pre-parsed and cached for use when the template is rendered by name. + + Any augmentations will also be added to the template. + + Args: + name (str): Name of the prompt to load. + + Returns: + PromptTemplate: The loaded and parsed prompt template. + + Raises: + ApplicationError: If the prompt is not found or there is an error loading it. + """ + if name not in self._prompts: + template_name = name + + # Load template from disk + folder = os.path.join(self._options.prompts_folder, name) + config_file = os.path.join(folder, "config.json") + prompt_file = os.path.join(folder, "skprompt.txt") + + # Load prompt config + try: + with open(config_file, "r", encoding="utf-8") as file: + template_config = PromptTemplateConfig.from_dict(json.load(file)) + except Exception as e: + raise ApplicationError( + ( + "PromptManager.get_prompt(): an error occurred while loading " + f"'{config_file}'. The file is either invalid or missing." + ) + ) from e + + # Load prompt text + sections: List[PromptSection] = [] + try: + with open(prompt_file, "r", encoding="utf-8") as file: + prompt = file.read() + sections.append(TemplateSection(prompt, self._options.role)) + except Exception as e: + raise ApplicationError( + ( + "PromptManager.get_prompt(): an error occurred while loading " + f"'{prompt_file}'. The file is either invalid or missing." + ) + ) from e + + # Migrate the templates config as needed + self._update_config(template_config) + + # Group everything into a system message + sections = [GroupSection(sections, "system")] + + # Include conversation history + # - The ConversationHistory section will use the remaining tokens from + # max_input_tokens. + if template_config.completion.include_history: + sections.append( + ConversationHistory( + f"conversation.{template_name}_history", + self._options.max_conversation_history_tokens, + ) + ) + + # Include user input + if template_config.completion.include_images: + sections.append(UserInputMessage(self._options.max_input_tokens)) + elif template_config.completion.include_input: + sections.append(UserMessage("{{$temp.input}}", self._options.max_input_tokens)) + + template = PromptTemplate(template_name, Prompt(sections), template_config) + + # Cache loaded template + self._prompts[name] = template + + return self._prompts[name] + + def has_prompt(self, name: str) -> bool: + """ + Checks for the existence of a named prompt. + + Args: + name (str): Name of the prompt to check. + + Returns: + bool: True if the prompt exists, False otherwise. + """ + if name not in self._prompts: + folder = os.path.join(self._options.prompts_folder, name) + prompt_file = os.path.join(folder, "skprompt.txt") + + return Path(prompt_file).exists() + return True + + def _update_config(self, template_config: PromptTemplateConfig): + # Migrate old schema + if template_config.schema == 1: + template_config.schema = 1.1 + if ( + template_config.default_backends is not None + and len(template_config.default_backends) > 0 + ): + template_config.completion.model = template_config.default_backends[0] diff --git a/python/packages/ai/teams/ai/promptsv2/prompt_manager_options.py b/python/packages/ai/teams/ai/promptsv2/prompt_manager_options.py new file mode 100644 index 000000000..093b38888 --- /dev/null +++ b/python/packages/ai/teams/ai/promptsv2/prompt_manager_options.py @@ -0,0 +1,45 @@ +""" +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the MIT License. +""" +from dataclasses import dataclass + + +@dataclass +class PromptManagerOptions: + """ + Options for the PromptManager. + """ + + prompts_folder: str + """Path to the filesystem folder containing all the applications prompts.""" + + role: str = "system" + """ + Optional. Message role to use for loaded prompts. + Defaults to 'system'. + """ + + max_conversation_history_tokens: float = 1.0 + """ + Optional. Maximum number of tokens of conversation history to include in prompts. + The default is to let conversation history consume the remainder of the prompts + `max_input_tokens` budget. Setting this a value greater then 1 will override that and + all prompts will use a fixed token budget. + """ + + max_history_messages: int = 10 + """ + Optional. Maximum number of messages to use when rendering conversation_history. + This controls the automatic pruning of the conversation history that's done by the planners + LLMClient instance. This helps keep your memory from getting too big and defaults to a value + of `10` (or 5 turns.) + """ + + max_input_tokens: int = -1 + """ + Optional. Maximum number of tokens user input to include in prompts. + This defaults to unlimited but can set to a value greater then `1` to limit the length of + user input included in prompts. For example, if set to `100` then the any user input over + 100 tokens in length will be truncated. + """ diff --git a/python/packages/ai/teams/ai/promptsv2/prompt_template_config.py b/python/packages/ai/teams/ai/promptsv2/prompt_template_config.py index c617bdcd5..a23a9a8f5 100644 --- a/python/packages/ai/teams/ai/promptsv2/prompt_template_config.py +++ b/python/packages/ai/teams/ai/promptsv2/prompt_template_config.py @@ -36,3 +36,13 @@ class PromptTemplateConfig: type: Literal["completion"] = "completion" description: Optional[str] = None default_backends: Optional[List[str]] = None + + @classmethod + def from_dict(cls, data: dict) -> "PromptTemplateConfig": + return cls( + schema=data.get("schema", 1.0), + completion=CompletionConfig.from_dict(data["completion"]), + type=data.get("type", "completion"), + description=data.get("description"), + default_backends=data.get("default_backends"), + ) diff --git a/python/packages/ai/tests/ai/prompts/test_assets/happy_path/config.json b/python/packages/ai/tests/ai/prompts/test_assets/happy_path/config.json new file mode 100644 index 000000000..ee53f805e --- /dev/null +++ b/python/packages/ai/tests/ai/prompts/test_assets/happy_path/config.json @@ -0,0 +1,24 @@ +{ + "schema": 1.1, + "description": "test config", + "type": "completion", + "completion": { + "model": "gpt-3.5-turbo", + "completion_type": "chat", + "include_history": true, + "include_input": true, + "max_input_tokens": 2800, + "max_tokens": 1000, + "temperature": 0.9, + "top_p": 0.0, + "presence_penalty": 0.6, + "frequency_penalty": 0.0, + "stop_sequences": [] + }, + "augmentation": { + "augmentation_type": "none", + "data_sources": { + "teams-ai": 1200 + } + } + } \ No newline at end of file diff --git a/python/packages/ai/tests/ai/prompts/test_assets/happy_path/skprompt.txt b/python/packages/ai/tests/ai/prompts/test_assets/happy_path/skprompt.txt new file mode 100644 index 000000000..9f250e94c --- /dev/null +++ b/python/packages/ai/tests/ai/prompts/test_assets/happy_path/skprompt.txt @@ -0,0 +1 @@ +test prompt \ No newline at end of file diff --git a/python/packages/ai/tests/ai/prompts/test_assets/include_images/config.json b/python/packages/ai/tests/ai/prompts/test_assets/include_images/config.json new file mode 100644 index 000000000..5b489e9c2 --- /dev/null +++ b/python/packages/ai/tests/ai/prompts/test_assets/include_images/config.json @@ -0,0 +1,25 @@ +{ + "schema": 1.1, + "description": "test config", + "type": "completion", + "completion": { + "model": "gpt-3.5-turbo", + "completion_type": "chat", + "include_history": true, + "include_input": true, + "include_images": true, + "max_input_tokens": 2800, + "max_tokens": 1000, + "temperature": 0.9, + "top_p": 0.0, + "presence_penalty": 0.6, + "frequency_penalty": 0.0, + "stop_sequences": [] + }, + "augmentation": { + "augmentation_type": "none", + "data_sources": { + "teams-ai": 1200 + } + } + } \ No newline at end of file diff --git a/python/packages/ai/tests/ai/prompts/test_assets/include_images/skprompt.txt b/python/packages/ai/tests/ai/prompts/test_assets/include_images/skprompt.txt new file mode 100644 index 000000000..9f250e94c --- /dev/null +++ b/python/packages/ai/tests/ai/prompts/test_assets/include_images/skprompt.txt @@ -0,0 +1 @@ +test prompt \ No newline at end of file diff --git a/python/packages/ai/tests/ai/prompts/test_assets/migrate_old_schema/config.json b/python/packages/ai/tests/ai/prompts/test_assets/migrate_old_schema/config.json new file mode 100644 index 000000000..f4ffdac8a --- /dev/null +++ b/python/packages/ai/tests/ai/prompts/test_assets/migrate_old_schema/config.json @@ -0,0 +1,19 @@ +{ + "schema": 1, + "description": "test config", + "type": "completion", + "completion": { + "max_tokens": 1000, + "temperature": 0.9, + "top_p": 0.0, + "presence_penalty": 0.6, + "frequency_penalty": 0.0, + "stop_sequences": [ + "Human:", + "AI:" + ] + }, + "default_backends": [ + "gpt-3.5-turbo" + ] + } \ No newline at end of file diff --git a/python/packages/ai/tests/ai/prompts/test_assets/migrate_old_schema/skprompt.txt b/python/packages/ai/tests/ai/prompts/test_assets/migrate_old_schema/skprompt.txt new file mode 100644 index 000000000..9f250e94c --- /dev/null +++ b/python/packages/ai/tests/ai/prompts/test_assets/migrate_old_schema/skprompt.txt @@ -0,0 +1 @@ +test prompt \ No newline at end of file diff --git a/python/packages/ai/tests/ai/prompts/test_assets/no_config/skprompt.txt b/python/packages/ai/tests/ai/prompts/test_assets/no_config/skprompt.txt new file mode 100644 index 000000000..9f250e94c --- /dev/null +++ b/python/packages/ai/tests/ai/prompts/test_assets/no_config/skprompt.txt @@ -0,0 +1 @@ +test prompt \ No newline at end of file diff --git a/python/packages/ai/tests/ai/prompts/test_assets/no_prompt/config.json b/python/packages/ai/tests/ai/prompts/test_assets/no_prompt/config.json new file mode 100644 index 000000000..ee53f805e --- /dev/null +++ b/python/packages/ai/tests/ai/prompts/test_assets/no_prompt/config.json @@ -0,0 +1,24 @@ +{ + "schema": 1.1, + "description": "test config", + "type": "completion", + "completion": { + "model": "gpt-3.5-turbo", + "completion_type": "chat", + "include_history": true, + "include_input": true, + "max_input_tokens": 2800, + "max_tokens": 1000, + "temperature": 0.9, + "top_p": 0.0, + "presence_penalty": 0.6, + "frequency_penalty": 0.0, + "stop_sequences": [] + }, + "augmentation": { + "augmentation_type": "none", + "data_sources": { + "teams-ai": 1200 + } + } + } \ No newline at end of file diff --git a/python/packages/ai/tests/ai/prompts/test_prompt_manager_v2.py b/python/packages/ai/tests/ai/prompts/test_prompt_manager_v2.py new file mode 100644 index 000000000..c58997d6c --- /dev/null +++ b/python/packages/ai/tests/ai/prompts/test_prompt_manager_v2.py @@ -0,0 +1,173 @@ +""" +Copyright (c) Microsoft Corporation. All rights reserved. +Licensed under the MIT License. +""" + +import os +from unittest import IsolatedAsyncioTestCase +from unittest.mock import MagicMock + +from teams.ai.data_sources import TextDataSource +from teams.ai.promptsv2 import ( + ConversationHistory, + GroupSection, + Prompt, + PromptFunction, + PromptManager, + PromptManagerOptions, + PromptTemplate, + TemplateSection, + UserInputMessage, + UserMessage, +) +from teams.app_error import ApplicationError + +TEST_ASSERTS_FOLDER: str = os.path.join("tests", "ai", "prompts", "test_assets") + + +class TestPromptManager(IsolatedAsyncioTestCase): + def setUp(self): + self.options = PromptManagerOptions(TEST_ASSERTS_FOLDER) + self.prompt_manager = PromptManager(self.options) + + def test_add_and_get_data_source(self): + data_source = TextDataSource("test_name", "test_text") + self.prompt_manager.add_data_source(data_source) + self.assertTrue(self.prompt_manager.has_data_source("test_name")) + self.assertEqual(self.prompt_manager.get_data_source("test_name"), data_source) + + def test_add_data_source_duplicate(self): + data_source = TextDataSource("test_name", "test_text") + self.prompt_manager.add_data_source(data_source) + with self.assertRaises(ApplicationError) as context: + self.prompt_manager.add_data_source(data_source) + self.assertEqual(str(context.exception), "DataSource 'test_name' already exists.") + + def test_get_data_source_not_found(self): + with self.assertRaises(ApplicationError) as context: + self.prompt_manager.get_data_source("not_exist") + self.assertEqual(str(context.exception), "DataSource 'not_exist' not found.") + + def test_add_and_get_function(self): + function = MagicMock(spec=PromptFunction) + self.prompt_manager.add_function("test_name", function) + self.assertTrue(self.prompt_manager.has_function("test_name")) + self.assertEqual(self.prompt_manager.get_function("test_name"), function) + + def test_add_function_duplicate(self): + function = MagicMock(spec=PromptFunction) + self.prompt_manager.add_function("test_name", function) + with self.assertRaises(ApplicationError) as context: + self.prompt_manager.add_function("test_name", function) + self.assertEqual(str(context.exception), "Function 'test_name' already exists.") + + def test_get_function_not_found(self): + with self.assertRaises(ApplicationError) as context: + self.prompt_manager.get_function("not_exist") + self.assertEqual(str(context.exception), "Function 'not_exist' not found.") + + async def test_add_and_get_prompt(self): + prompt = MagicMock(spec=PromptTemplate) + prompt.name = "test" + self.prompt_manager.add_prompt(prompt) + self.assertTrue(self.prompt_manager.has_prompt("test")) + + def test_add_prompt_duplicate(self): + prompt = MagicMock(spec=PromptTemplate) + prompt.name = "test" + self.prompt_manager.add_prompt(prompt) + with self.assertRaises(ApplicationError) as context: + self.prompt_manager.add_prompt(prompt) + self.assertEqual( + str(context.exception), + ( + "The PromptManager.add_prompt() method was called with a " + "previously registered prompt named 'test'." + ), + ) + + def test_has_prompt_from_file(self): + self.assertTrue(self.prompt_manager.has_prompt("happy_path")) + + def test_has_prompt_not_found(self): + self.assertFalse(self.prompt_manager.has_prompt("not_found")) + + async def test_get_prompt_from_file_no_config(self): + with self.assertRaises(ApplicationError) as context: + await self.prompt_manager.get_prompt("no_config") + self.assertEqual( + str(context.exception), + "PromptManager.get_prompt(): an error occurred while loading '" + + os.path.join(TEST_ASSERTS_FOLDER, "no_config", "config.json") + + "'. The file is either invalid or missing.", + ) + + async def test_get_prompt_from_file_no_prompt(self): + with self.assertRaises(ApplicationError) as context: + await self.prompt_manager.get_prompt("no_prompt") + self.assertEqual( + str(context.exception), + "PromptManager.get_prompt(): an error occurred while loading '" + + os.path.join(TEST_ASSERTS_FOLDER, "no_prompt", "skprompt.txt") + + "'. The file is either invalid or missing.", + ) + + async def test_get_prompt_from_file(self): + prompt = await self.prompt_manager.get_prompt("happy_path") + + self.assertEqual(prompt.name, "happy_path") + assert isinstance(prompt.prompt, Prompt) + print(prompt.prompt.sections) + self.assertEqual(len(prompt.prompt.sections), 3) + assert isinstance(prompt.prompt.sections[0], GroupSection) + self.assertEqual(len(prompt.prompt.sections[0].sections), 1) + assert isinstance(prompt.prompt.sections[0].sections[0], TemplateSection) + self.assertEqual(prompt.prompt.sections[0].sections[0].template, "test prompt") + assert isinstance(prompt.prompt.sections[1], ConversationHistory) + self.assertEqual(prompt.prompt.sections[1].variable, "conversation.happy_path_history") + self.assertEqual( + prompt.prompt.sections[1].tokens, self.options.max_conversation_history_tokens + ) + assert isinstance(prompt.prompt.sections[2], UserMessage) + self.assertEqual(prompt.prompt.sections[2].template, "{{$temp.input}}") + self.assertEqual(prompt.prompt.sections[2].tokens, self.options.max_input_tokens) + + self.assertEqual(prompt.config.schema, 1.1) + self.assertEqual(prompt.config.description, "test config") + self.assertEqual(prompt.config.type, "completion") + self.assertEqual(prompt.config.completion.model, "gpt-3.5-turbo") + self.assertEqual(prompt.config.completion.completion_type, "chat") + self.assertEqual(prompt.config.completion.include_history, True) + self.assertEqual(prompt.config.completion.include_input, True) + self.assertEqual(prompt.config.completion.max_input_tokens, 2800) + self.assertEqual(prompt.config.completion.max_tokens, 1000) + self.assertEqual(prompt.config.completion.temperature, 0.9) + self.assertEqual(prompt.config.completion.top_p, 0.0) + self.assertEqual(prompt.config.completion.presence_penalty, 0.6) + self.assertEqual(prompt.config.completion.frequency_penalty, 0.0) + self.assertEqual(prompt.config.completion.stop_sequences, []) + + async def test_get_prompt_from_file_include_images(self): + prompt = await self.prompt_manager.get_prompt("include_images") + + self.assertEqual(prompt.name, "include_images") + assert isinstance(prompt.prompt, Prompt) + print(prompt.prompt.sections) + self.assertEqual(len(prompt.prompt.sections), 3) + assert isinstance(prompt.prompt.sections[0], GroupSection) + self.assertEqual(len(prompt.prompt.sections[0].sections), 1) + assert isinstance(prompt.prompt.sections[0].sections[0], TemplateSection) + self.assertEqual(prompt.prompt.sections[0].sections[0].template, "test prompt") + assert isinstance(prompt.prompt.sections[1], ConversationHistory) + self.assertEqual(prompt.prompt.sections[1].variable, "conversation.include_images_history") + self.assertEqual( + prompt.prompt.sections[1].tokens, self.options.max_conversation_history_tokens + ) + assert isinstance(prompt.prompt.sections[2], UserInputMessage) + self.assertEqual(prompt.prompt.sections[2].tokens, self.options.max_input_tokens) + + async def test_get_prompt_from_file_migrate_old_schema(self): + prompt = await self.prompt_manager.get_prompt("migrate_old_schema") + + self.assertEqual(prompt.config.schema, 1.1) + self.assertEqual(prompt.config.completion.model, "gpt-3.5-turbo")