Skip to content

Commit

Permalink
Merge branch 'main' into wanhan/flow_as_func
Browse files Browse the repository at this point in the history
  • Loading branch information
D-W- committed Nov 1, 2023
2 parents fede104 + f47660e commit c6a16a3
Show file tree
Hide file tree
Showing 23 changed files with 362 additions and 87 deletions.
3 changes: 3 additions & 0 deletions docs/reference/pfazure-command-reference.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,9 @@ Manage prompt flow runs.
| [pfazure run show-details](#pfazure-run-show-details) | Show a run details. |
| [pfazure run show-metrics](#pfazure-run-show-metrics) | Show run metrics. |
| [pfazure run visualize](#pfazure-run-visualize) | Visualize a run. |
| [pfazure run archive](#pfazure-run-archive) | Archive a run. |
| [pfazure run restore](#pfazure-run-restore) | Restore a run. |
| [pfazure run update](#pfazure-run-update) | Update a run. |

### pfazure run create

Expand Down
35 changes: 22 additions & 13 deletions src/promptflow/promptflow/_cli/_pf/_flow.py
Original file line number Diff line number Diff line change
Expand Up @@ -218,9 +218,7 @@ def add_parser_test_flow(subparsers):
add_param_multi_modal = lambda parser: parser.add_argument( # noqa: E731
"--multi-modal", action="store_true", help=argparse.SUPPRESS
)
add_param_ui = lambda parser: parser.add_argument( # noqa: E731
"--ui", action="store_true", help=argparse.SUPPRESS
)
add_param_ui = lambda parser: parser.add_argument("--ui", action="store_true", help=argparse.SUPPRESS) # noqa: E731
add_param_input = lambda parser: parser.add_argument("--input", type=str, help=argparse.SUPPRESS) # noqa: E731

add_params = [
Expand Down Expand Up @@ -389,22 +387,35 @@ def test_flow(args):
if args.multi_modal or args.ui:
with tempfile.TemporaryDirectory() as temp_dir:
try:
from streamlit.web import cli as st_cli
import streamlit_quill # noqa: F401
import bs4 # noqa: F401
import streamlit_quill # noqa: F401
from streamlit.web import cli as st_cli
except ImportError as ex:
raise UserErrorException(
f"Please try 'pip install promptflow[executable]' to install dependency, {ex.msg}."
)
flow = load_flow(args.flow)

script_path = [os.path.join(temp_dir, "main.py"), os.path.join(temp_dir, "utils.py"),
os.path.join(temp_dir, "logo.png")]
script_path = [
os.path.join(temp_dir, "main.py"),
os.path.join(temp_dir, "utils.py"),
os.path.join(temp_dir, "logo.png"),
]
for script in script_path:
StreamlitFileGenerator(flow_name=flow.name, flow_dag_path=flow.flow_dag_path).generate_to_file(script)

sys.argv = ["streamlit", "run", os.path.join(temp_dir, "main.py"), "--global.developmentMode=false",
"--client.toolbarMode=viewer", "--browser.gatherUsageStats=false"]
StreamlitFileGenerator(
flow_name=flow.name,
flow_dag_path=flow.flow_dag_path,
connection_provider=pf_client._ensure_connection_provider(),
).generate_to_file(script)

sys.argv = [
"streamlit",
"run",
os.path.join(temp_dir, "main.py"),
"--global.developmentMode=false",
"--client.toolbarMode=viewer",
"--browser.gatherUsageStats=false",
]
st_cli.main()
else:
if args.interactive:
Expand All @@ -414,7 +425,6 @@ def test_flow(args):
environment_variables=environment_variables,
variant=args.variant,
show_step_output=args.verbose,
config=config,
)
else:
result = pf_client.flows._test(
Expand All @@ -424,7 +434,6 @@ def test_flow(args):
variant=args.variant,
node=args.node,
allow_generator_output=False,
config=config,
stream_output=False,
)
# Dump flow/node test info
Expand Down
51 changes: 35 additions & 16 deletions src/promptflow/promptflow/_cli/_pf/_init_entry_generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,8 +13,9 @@
from jinja2 import Environment, Template, meta

from promptflow._sdk._constants import LOGGER_NAME
from promptflow.contracts.flow import Flow as ExecutableFlow
from promptflow._sdk.operations._flow_operations import FlowOperations
from promptflow.contracts.flow import Flow as ExecutableFlow
from promptflow.exceptions import UserErrorException

logger = logging.getLogger(LOGGER_NAME)
TEMPLATE_PATH = Path(__file__).parent.parent / "data" / "entry_flow"
Expand Down Expand Up @@ -223,43 +224,61 @@ def entry_template_keys(self):


class StreamlitFileGenerator(BaseGenerator):
def __init__(self, flow_name, flow_dag_path):
def __init__(self, flow_name, flow_dag_path, connection_provider):
self.flow_name = flow_name
self.flow_dag_path = Path(flow_dag_path)
self.connection_provider = connection_provider
self.executable = ExecutableFlow.from_yaml(
flow_file=Path(self.flow_dag_path.name), working_dir=self.flow_dag_path.parent
)
self.is_chat_flow, self.chat_history_input_name, _ = FlowOperations._is_chat_flow(self.executable)
self.is_chat_flow, self.chat_history_input_name, error_msg = FlowOperations._is_chat_flow(self.executable)
if not self.is_chat_flow:
raise UserErrorException(f"Only support chat flow in ui mode, {error_msg}.")
self._chat_input_name = next(
(flow_input for flow_input, value in self.executable.inputs.items() if value.is_chat_input), None)
self._chat_input = self.executable.inputs[self._chat_input_name]

@property
def chat_input_default_value(self):
return self._chat_input.default

@property
def chat_input_value_type(self):
return self._chat_input.type

@property
def flow_inputs(self):
return {flow_input: (value.default, value.type.value) for flow_input, value in self.executable.inputs.items()
if not value.is_chat_history}
def chat_input_name(self):
return self._chat_input_name

@property
def flow_inputs_params(self):
flow_inputs_params = ["=".join([flow_input, flow_input]) for flow_input, _ in self.flow_inputs.items()]
return ",".join(flow_inputs_params)
return f"{self.chat_input_name}={self.chat_input_name}"

@property
def tpl_file(self):
return SERVE_TEMPLATE_PATH / "main.py.jinja2"
return SERVE_TEMPLATE_PATH / "flow_test_main.py.jinja2"

@property
def flow_path(self):
return self.flow_dag_path.as_posix()

@property
def label(self):
return "Chat" if self.is_chat_flow else "Run"

@property
def entry_template_keys(self):
return ["flow_name", "flow_inputs", "flow_inputs_params", "flow_path", "is_chat_flow",
"chat_history_input_name", "label"]
return [
"flow_name",
"chat_input_name",
"flow_inputs_params",
"flow_path",
"is_chat_flow",
"chat_history_input_name",
"connection_provider",
"chat_input_default_value",
"chat_input_value_type",
"chat_input_name",
]

def generate_to_file(self, target):
if Path(target).name == self.tpl_file.stem:
if Path(target).name == "main.py":
super().generate_to_file(target=target)
else:
shutil.copy(SERVE_TEMPLATE_PATH / Path(target).name, target)
Expand Down
12 changes: 8 additions & 4 deletions src/promptflow/promptflow/_sdk/_pf_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ def __init__(self, **kwargs):
self._config = kwargs.get("config", None) or {}
# Lazy init to avoid azure credential requires too early
self._connections = None
self._flows = FlowOperations()
self._flows = FlowOperations(client=self)
self._tools = ToolOperations()
setup_user_agent_to_operation_context(USER_AGENT)

Expand Down Expand Up @@ -181,13 +181,17 @@ def runs(self) -> RunOperations:
"""Run operations that can manage runs."""
return self._runs

def _ensure_connection_provider(self) -> str:
if not self._connection_provider:
# Get a copy with config override instead of the config instance
self._connection_provider = Configuration(overrides=self._config).get_connection_provider()
return self._connection_provider

@property
def connections(self) -> ConnectionOperations:
"""Connection operations that can manage connections."""
if not self._connections:
if not self._connection_provider:
# Get a copy with config override instead of the config instance
self._connection_provider = Configuration(overrides=self._config).get_connection_provider()
self._ensure_connection_provider()
if self._connection_provider == ConnectionProvider.LOCAL.value:
logger.debug("Using local connection operations.")
self._connections = ConnectionOperations()
Expand Down
44 changes: 32 additions & 12 deletions src/promptflow/promptflow/_sdk/_serving/flow_invoker.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,14 @@ def _init_executor(self):
self.executor.enable_streaming_for_llm_flow(self.streaming)
logger.info("Promptflow executor initiated successfully.")

def invoke(self, data: dict):
def _invoke(self, data: dict):
"""
Process a flow request in the runtime.
:param data: The request data dict with flow input as keys, for example: {"question": "What is ChatGPT?"}.
:type data: dict
:return: The flow output dict, for example: {"answer": "ChatGPT is a chatbot."}.
:rtype: dict
:return: The result of executor.
:rtype: ~promptflow.executor._result.LineResult
"""
logger.info(f"PromptFlow invoker received data: {data}")

Expand All @@ -121,17 +121,37 @@ def invoke(self, data: dict):
# Pass index 0 as extension require for dumped result.
# TODO: Remove this index after extension remove this requirement.
result = self.executor.exec_line(data, index=0, allow_generator_output=self.streaming())
if LINE_NUMBER_KEY in result.output:
# Remove line number from output
del result.output[LINE_NUMBER_KEY]
return result

def invoke(self, data: dict):
"""
Process a flow request in the runtime and return the output of the executor.
:param data: The request data dict with flow input as keys, for example: {"question": "What is ChatGPT?"}.
:type data: dict
:return: The flow output dict, for example: {"answer": "ChatGPT is a chatbot."}.
:rtype: dict
"""
result = self._invoke(data)
# Get base64 for multi modal object
resolved_outputs = self._convert_multimedia_data_to_base64(result)
self._dump_invoke_result(result)
print_yellow_warning(f"Result: {result.output}")
return resolved_outputs

def _convert_multimedia_data_to_base64(self, invoke_result):
resolved_outputs = {
k: convert_multimedia_data_to_base64(v, with_type=True, dict_type=True) for k, v in result.output.items()
k: convert_multimedia_data_to_base64(v, with_type=True, dict_type=True)
for k, v in invoke_result.output.items()
}
return resolved_outputs

def _dump_invoke_result(self, invoke_result):
if self._dump_to:
result.output = persist_multimedia_data(
result.output, base_dir=self._dump_to, sub_dir=Path(".promptflow/output")
invoke_result.output = persist_multimedia_data(
invoke_result.output, base_dir=self._dump_to, sub_dir=Path(".promptflow/output")
)
dump_flow_result(flow_folder=self._dump_to, flow_result=result, prefix=self._dump_file_prefix)
print_yellow_warning(f"Result: {result.output}")
if LINE_NUMBER_KEY in resolved_outputs:
# Remove line number from output
del resolved_outputs[LINE_NUMBER_KEY]
return resolved_outputs
dump_flow_result(flow_folder=self._dump_to, flow_result=invoke_result, prefix=self._dump_file_prefix)
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
{# This template is added only for chat flow with single input and output. #}
import json
import os
from pathlib import Path
from PIL import Image
import streamlit as st
from streamlit_quill import st_quill

from promptflow._sdk._serving.flow_invoker import FlowInvoker

from utils import dict_iter_render_message, parse_list_from_html, parse_image_content

invoker = None
{% set indent_level = 4 %}

def start():
def clear_chat() -> None:
st.session_state.messages = []

def render_message(role, message_items):
with st.chat_message(role):
dict_iter_render_message(message_items)

def show_conversation() -> None:
if "messages" not in st.session_state:
st.session_state.messages = []
st.session_state.history = []
if st.session_state.messages:
for role, message_items in st.session_state.messages:
render_message(role, message_items)


def get_chat_history_from_session():
if "history" in st.session_state:
return st.session_state.history
return []


def submit(**kwargs) -> None:
st.session_state.messages.append(("user", kwargs))
session_state_history = dict()
session_state_history.update({"inputs": kwargs})
with container:
render_message("user", kwargs)
# Append chat history to kwargs
response = run_flow({'{{chat_history_input_name}}': get_chat_history_from_session(), **kwargs})
# Get base64 for multi modal object
resolved_outputs = invoker._convert_multimedia_data_to_base64(response)
st.session_state.messages.append(("assistant", resolved_outputs))
session_state_history.update({"outputs": response.output})
st.session_state.history.append(session_state_history)
invoker._dump_invoke_result(response)
with container:
render_message("assistant", resolved_outputs)


def run_flow(data: dict) -> dict:
global invoker
if not invoker:
flow = Path('{{flow_path}}')
dump_path = Path('{{flow_path}}').parent
if flow.is_dir():
os.chdir(flow)
else:
os.chdir(flow.parent)
invoker = FlowInvoker(flow, connection_provider="""{{ connection_provider }}""", dump_to=dump_path)
result = invoker._invoke(data)
return result

image = Image.open(Path(__file__).parent / "logo.png")
st.set_page_config(
layout="wide",
page_title="{{flow_name}} - Promptflow App",
page_icon=image,
menu_items={
'About': """
# This is a Promptflow App.

You can refer to [promptflow](https://github.com/microsoft/promptflow) for more information.
"""
}
)
# Set primary button color here since button color of the same form need to be identical in streamlit, but we only need Run/Chat button to be blue.
st.config.set_option("theme.primaryColor", "#0F6CBD")
st.title("{{flow_name}}")
st.divider()
st.chat_message("assistant").write("Hello, please input following flow inputs.")
container = st.container()
with container:
show_conversation()

with st.form(key='input_form', clear_on_submit=True):
st.text('{{chat_input_name}}')
{% if chat_input_value_type == "list" %}
{{chat_input_name}} = st_quill(html=True, toolbar=["image"], key='{{chat_input_name}}', placeholder='Please enter the list values and use the image icon to upload a picture. Make sure to format each list item correctly with line breaks')
{% elif chat_input_value_type == "string" %}
{{chat_input_name}} = st.text_input(label='{{chat_input_name}}', placeholder='{{chat_input_default_value}}')
{% else %}
{{chat_input_name}} = st.text_input(label='{{chat_input_name}}', placeholder={{chat_input_default_value}})
{% endif %}

cols = st.columns(7)
submit_bt = cols[0].form_submit_button(label='Chat', type='primary')
clear_bt = cols[1].form_submit_button(label='Clear')

if submit_bt:
with st.spinner("Loading..."):
{% if chat_input_value_type == "list" %}
{{chat_input_name}} = parse_list_from_html({{chat_input_name}})
{% endif %}
submit({{flow_inputs_params}})

if clear_bt:
with st.spinner("Cleaning..."):
clear_chat()
st.rerun()

if __name__ == "__main__":
start()
Loading

0 comments on commit c6a16a3

Please sign in to comment.