Skip to content

Commit 57fe0e5

Browse files
committed
completed augmentations implementation
1 parent 6595add commit 57fe0e5

14 files changed

+1122
-12
lines changed

python/packages/ai/teams/ai/ai.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -288,7 +288,7 @@ async def _on_do_command(
288288
state: StateT,
289289
) -> bool:
290290
action = self._actions.get(context.data.action)
291-
ctx = ActionTurnContext(context.data.action, context.data.entities, context)
291+
ctx = ActionTurnContext(context.data.action, context.data.parameters, context)
292292

293293
if not action:
294294
return await self._on_unknown_action(ctx, state)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
"""
2+
Copyright (c) Microsoft Corporation. All rights reserved.
3+
Licensed under the MIT License.
4+
"""
5+
6+
from .action_augmentation_section import ActionAugmentationSection
7+
from .augmentation import Augmentation
8+
from .default_augmentation import DefaultAugmentation
9+
from .monologue_augmentation import MonologueAugmentation
10+
from .sequence_augmentation import SequenceAugmentation
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,116 @@
1+
"""
2+
Copyright (c) Microsoft Corporation. All rights reserved.
3+
Licensed under the MIT License.
4+
"""
5+
6+
from dataclasses import dataclass
7+
from typing import Any, Dict, List, Optional, Union
8+
9+
import yaml
10+
from botbuilder.core import TurnContext
11+
12+
from teams.ai.modelsv2.chat_completion_action import ChatCompletionAction
13+
from teams.ai.promptsv2.message import Message
14+
from teams.ai.promptsv2.prompt_functions import PromptFunctions
15+
from teams.ai.promptsv2.prompt_section_base import PromptSectionBase
16+
from teams.ai.promptsv2.rendered_prompt_section import RenderedPromptSection
17+
from teams.ai.tokenizers.tokenizer import Tokenizer
18+
from teams.state.memory import Memory
19+
20+
21+
@dataclass
22+
class ActionValue:
23+
description: Optional[str] = None
24+
parameters: Optional[Union[Dict[str, Any], Dict[str, Dict[str, Any]]]] = None
25+
26+
27+
@dataclass
28+
class ActionList:
29+
actions: Dict[str, ActionValue]
30+
31+
32+
class ActionAugmentationSection(PromptSectionBase):
33+
"""
34+
A prompt section that renders a list of actions to the prompt.
35+
"""
36+
37+
_text: str
38+
_token_list: Optional[List[int]] = None
39+
_actions: Dict[str, ChatCompletionAction] = {}
40+
41+
@property
42+
def actions(self) -> Dict[str, ChatCompletionAction]:
43+
"""
44+
Map of action names to actions.
45+
"""
46+
return self._actions
47+
48+
def __init__(self, actions: List[ChatCompletionAction], call_to_action: str) -> None:
49+
"""
50+
Creates a new `ActionAugmentationSection` instance.
51+
52+
Args:
53+
actions (List[ChatCompletionAction]): List of actions to render.
54+
call_to_action (str): Text to display after the list of actions.
55+
56+
"""
57+
super().__init__(-1, True, "\n\n")
58+
59+
# Convert actions to an ActionList
60+
action_list: ActionList = {"actions": {}}
61+
62+
for action in actions:
63+
self._actions[action.name] = action
64+
action_list["actions"][action.name] = {}
65+
if action.description:
66+
action_list["actions"][action.name]["description"] = action.description
67+
if action.parameters:
68+
params = action.parameters
69+
action_list["actions"][action.name]["parameters"] = (
70+
params.get("properties")
71+
if params.get("additional_properties") is None
72+
else params
73+
)
74+
75+
# Build augmentation text
76+
self._text = f"{yaml.dump(action_list)}\n\n{call_to_action}"
77+
78+
async def render_as_messages(
79+
self,
80+
context: TurnContext,
81+
memory: Memory,
82+
functions: PromptFunctions,
83+
tokenizer: Tokenizer,
84+
max_tokens: int,
85+
) -> RenderedPromptSection[List[Message[str]]]:
86+
"""
87+
Renders the prompt section as a list of `Message` objects.
88+
89+
Args:
90+
context (TurnContext): Context for the current turn of conversation.
91+
memory (Memory): Interface for accessing state variables.
92+
functions (PromptFunctions): Functions for rendering prompts.
93+
tokenizer (Tokenizer): Tokenizer to use for encoding/decoding text.
94+
max_tokens (int): Maximum number of tokens allowed for the rendered prompt.
95+
96+
Returns:
97+
RenderedPromptSection[List[Message[str]]]: The rendered prompt section.
98+
99+
"""
100+
# Tokenize on first use
101+
if not self._token_list:
102+
self._token_list = tokenizer.encode(self._text)
103+
104+
# Check for max tokens
105+
if len(self._token_list) > max_tokens:
106+
trimmed = self._token_list[0:max_tokens]
107+
return RenderedPromptSection[List[Message[str]]](
108+
output=[Message[str](role="system", content=tokenizer.decode(trimmed))],
109+
length=len(trimmed),
110+
too_long=True,
111+
)
112+
return RenderedPromptSection[List[Message[str]]](
113+
output=[Message[str](role="system", content=self._text)],
114+
length=len(self._token_list),
115+
too_long=False,
116+
)
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,50 @@
1+
"""
2+
Copyright (c) Microsoft Corporation. All rights reserved.
3+
Licensed under the MIT License.
4+
"""
5+
6+
from abc import ABC, abstractmethod
7+
from typing import Generic, TypeVar, Union
8+
9+
from botbuilder.core import TurnContext
10+
11+
from teams.ai.modelsv2.prompt_response import PromptResponse
12+
from teams.ai.planner import Plan
13+
from teams.ai.promptsv2.prompt_section import PromptSection
14+
from teams.ai.validators.prompt_response_validator import PromptResponseValidator
15+
from teams.state.memory import Memory
16+
17+
ValueT = TypeVar("ValueT")
18+
"Type of message content returned for a 'success' response."
19+
20+
21+
class Augmentation(PromptResponseValidator[ValueT], ABC, Generic[ValueT]):
22+
"""
23+
An augmentation is a component that can be added to a prompt template to add additional
24+
functionality to the prompt.
25+
"""
26+
27+
@abstractmethod
28+
def create_prompt_section(self) -> Union[PromptSection, None]:
29+
"""
30+
Creates an optional prompt section for the augmentation.
31+
32+
Returns:
33+
Union[PromptSection, None]: The prompt section.
34+
"""
35+
36+
@abstractmethod
37+
async def create_plan_from_response(
38+
self, turn_context: TurnContext, memory: Memory, response: PromptResponse[ValueT]
39+
) -> Plan:
40+
"""
41+
Creates a plan given validated response value.
42+
43+
Args:
44+
turn_context (TurnContext): Context for the current turn of conversation.
45+
memory (Memory): An interface for accessing state variables.
46+
response (PromptResponse[ValueT]): Validated, transformed response for the prompt.
47+
48+
Returns:
49+
Plan: The created plan.
50+
"""
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
"""
2+
Copyright (c) Microsoft Corporation. All rights reserved.
3+
Licensed under the MIT License.
4+
"""
5+
6+
from typing import Union
7+
8+
from botbuilder.core import TurnContext
9+
10+
from teams.ai.augmentations.augmentation import Augmentation
11+
from teams.ai.modelsv2.prompt_response import PromptResponse
12+
from teams.ai.planner import Plan
13+
from teams.ai.planner.predicted_say_command import PredictedSayCommand
14+
from teams.ai.promptsv2.prompt_section import PromptSection
15+
from teams.ai.tokenizers.tokenizer import Tokenizer
16+
from teams.ai.validators.validation import Validation
17+
from teams.state.memory import Memory
18+
19+
20+
class DefaultAugmentation(Augmentation[str]):
21+
"""
22+
The default 'none' augmentation.
23+
24+
This augmentation does not add any additional functionality to the prompt. It always
25+
returns a `Plan` with a single `SAY` command containing the models response.
26+
"""
27+
28+
def create_prompt_section(self) -> Union[PromptSection, None]:
29+
"""
30+
Creates an optional prompt section for the augmentation.
31+
"""
32+
return None
33+
34+
async def validate_response(
35+
self,
36+
context: TurnContext,
37+
memory: Memory,
38+
tokenizer: Tokenizer,
39+
response: PromptResponse[str],
40+
remaining_attempts: int,
41+
) -> Validation[str]:
42+
"""
43+
Validates a response to a prompt.
44+
45+
Args:
46+
context (TurnContext): Context for the current turn of conversation.
47+
memory (Memory): Interface for accessing state variables.
48+
tokenizer (Tokenizer): Tokenizer to use for encoding/decoding text.
49+
response (PromptResponse[str]): Response to validate.
50+
remaining_attempts (int): Nubmer of remaining attempts to validate the response.
51+
52+
Returns:
53+
Validation[str]: A 'Validation' object.
54+
"""
55+
return Validation[str](valid=True)
56+
57+
async def create_plan_from_response(
58+
self, turn_context: TurnContext, memory: Memory, response: PromptResponse[str]
59+
) -> Plan:
60+
"""
61+
Creates a plan given validated response value.
62+
63+
Args:
64+
turn_context (TurnContext): Context for the current turn of conversation.
65+
memory (Memory): Interface for accessing state variables.
66+
response (PromptResponse[str]): The validated and transformed response for the prompt.
67+
68+
Returns:
69+
Plan: The created plan.
70+
"""
71+
say_response = response.message.content if response.message.content else ""
72+
return Plan(commands=[PredictedSayCommand(response=say_response)])

0 commit comments

Comments
 (0)