Skip to content

Commit

Permalink
[Flow test] Resolve base64 in flow details to image path (#949)
Browse files Browse the repository at this point in the history
# Description
bug: streamlit will use base64 to record image in the chat history. 

![image](https://github.com/microsoft/promptflow/assets/17938940/2b2c9085-ab3f-48f0-81ee-5a55638d1672)

Expect:
Image path is shown in test details.

How to fix:
Convert all base64 multi media in test details to path

# All Promptflow Contribution checklist:
- [ ] **The pull request does not introduce [breaking changes].**
- [ ] **CHANGELOG is updated for new features, bug fixes or other
significant changes.**
- [ ] **I have read the [contribution guidelines](../CONTRIBUTING.md).**
- [ ] **Create an issue and link to the pull request to get dedicated
review from promptflow team. Learn more: [suggested
workflow](../CONTRIBUTING.md#suggested-workflow).**

## General Guidelines and Best Practices
- [ ] Title of the pull request is clear and informative.
- [ ] There are a small number of commits, each of which have an
informative message. This means that previously merged commits do not
appear in the history of the PR. For more information on cleaning up the
commits in your PR, [see this
page](https://github.com/Azure/azure-powershell/blob/master/documentation/development-docs/cleaning-up-commits.md).

### Testing Guidelines
- [ ] Pull request includes test coverage for the included changes.
  • Loading branch information
lalala123123 authored Oct 31, 2023
1 parent ea9bfa7 commit f47660e
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 17 deletions.
16 changes: 15 additions & 1 deletion src/promptflow/promptflow/_cli/_pf/_init_entry_generators.py
Original file line number Diff line number Diff line change
Expand Up @@ -234,10 +234,21 @@ def __init__(self, flow_name, flow_dag_path, connection_provider):
self.is_chat_flow, self.chat_history_input_name, error_msg = FlowOperations._is_chat_flow(self.executable)
if not self.is_chat_flow:
raise UserErrorException(f"Only support chat flow in ui mode, {error_msg}.")
self._chat_input_name = next(
(flow_input for flow_input, value in self.executable.inputs.items() if value.is_chat_input), None)
self._chat_input = self.executable.inputs[self._chat_input_name]

@property
def chat_input_default_value(self):
return self._chat_input.default

@property
def chat_input_value_type(self):
return self._chat_input.type

@property
def chat_input_name(self):
return next((flow_input for flow_input, value in self.executable.inputs.items() if value.is_chat_input), None)
return self._chat_input_name

@property
def flow_inputs_params(self):
Expand All @@ -261,6 +272,9 @@ def entry_template_keys(self):
"is_chat_flow",
"chat_history_input_name",
"connection_provider",
"chat_input_default_value",
"chat_input_value_type",
"chat_input_name",
]

def generate_to_file(self, target):
Expand Down
44 changes: 32 additions & 12 deletions src/promptflow/promptflow/_sdk/_serving/flow_invoker.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,14 @@ def _init_executor(self):
self.executor.enable_streaming_for_llm_flow(self.streaming)
logger.info("Promptflow executor initiated successfully.")

def invoke(self, data: dict):
def _invoke(self, data: dict):
"""
Process a flow request in the runtime.
:param data: The request data dict with flow input as keys, for example: {"question": "What is ChatGPT?"}.
:type data: dict
:return: The flow output dict, for example: {"answer": "ChatGPT is a chatbot."}.
:rtype: dict
:return: The result of executor.
:rtype: ~promptflow.executor._result.LineResult
"""
logger.info(f"PromptFlow invoker received data: {data}")

Expand All @@ -121,17 +121,37 @@ def invoke(self, data: dict):
# Pass index 0 as extension require for dumped result.
# TODO: Remove this index after extension remove this requirement.
result = self.executor.exec_line(data, index=0, allow_generator_output=self.streaming())
if LINE_NUMBER_KEY in result.output:
# Remove line number from output
del result.output[LINE_NUMBER_KEY]
return result

def invoke(self, data: dict):
"""
Process a flow request in the runtime and return the output of the executor.
:param data: The request data dict with flow input as keys, for example: {"question": "What is ChatGPT?"}.
:type data: dict
:return: The flow output dict, for example: {"answer": "ChatGPT is a chatbot."}.
:rtype: dict
"""
result = self._invoke(data)
# Get base64 for multi modal object
resolved_outputs = self._convert_multimedia_data_to_base64(result)
self._dump_invoke_result(result)
print_yellow_warning(f"Result: {result.output}")
return resolved_outputs

def _convert_multimedia_data_to_base64(self, invoke_result):
resolved_outputs = {
k: convert_multimedia_data_to_base64(v, with_type=True, dict_type=True) for k, v in result.output.items()
k: convert_multimedia_data_to_base64(v, with_type=True, dict_type=True)
for k, v in invoke_result.output.items()
}
return resolved_outputs

def _dump_invoke_result(self, invoke_result):
if self._dump_to:
result.output = persist_multimedia_data(
result.output, base_dir=self._dump_to, sub_dir=Path(".promptflow/output")
invoke_result.output = persist_multimedia_data(
invoke_result.output, base_dir=self._dump_to, sub_dir=Path(".promptflow/output")
)
dump_flow_result(flow_folder=self._dump_to, flow_result=result, prefix=self._dump_file_prefix)
print_yellow_warning(f"Result: {result.output}")
if LINE_NUMBER_KEY in resolved_outputs:
# Remove line number from output
del resolved_outputs[LINE_NUMBER_KEY]
return resolved_outputs
dump_flow_result(flow_folder=self._dump_to, flow_result=invoke_result, prefix=self._dump_file_prefix)
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,14 @@ def start():
render_message("user", kwargs)
# Append chat history to kwargs
response = run_flow({'{{chat_history_input_name}}': get_chat_history_from_session(), **kwargs})
st.session_state.messages.append(("assistant", response))
session_state_history.update({"outputs": response})
# Get base64 for multi modal object
resolved_outputs = invoker._convert_multimedia_data_to_base64(response)
st.session_state.messages.append(("assistant", resolved_outputs))
session_state_history.update({"outputs": response.output})
st.session_state.history.append(session_state_history)
invoker._dump_invoke_result(response)
with container:
render_message("assistant", response)
render_message("assistant", resolved_outputs)


def run_flow(data: dict) -> dict:
Expand All @@ -61,7 +64,7 @@ def start():
else:
os.chdir(flow.parent)
invoker = FlowInvoker(flow, connection_provider="""{{ connection_provider }}""", dump_to=dump_path)
result = invoker.invoke(data)
result = invoker._invoke(data)
return result

image = Image.open(Path(__file__).parent / "logo.png")
Expand All @@ -88,15 +91,23 @@ def start():

with st.form(key='input_form', clear_on_submit=True):
st.text('{{chat_input_name}}')
{% if chat_input_value_type == "list" %}
{{chat_input_name}} = st_quill(html=True, toolbar=["image"], key='{{chat_input_name}}', placeholder='Please enter the list values and use the image icon to upload a picture. Make sure to format each list item correctly with line breaks')
{% elif chat_input_value_type == "string" %}
{{chat_input_name}} = st.text_input(label='{{chat_input_name}}', placeholder='{{chat_input_default_value}}')
{% else %}
{{chat_input_name}} = st.text_input(label='{{chat_input_name}}', placeholder={{chat_input_default_value}})
{% endif %}

cols = st.columns(7)
submit_bt = cols[0].form_submit_button(label='Chat', type='primary')
clear_bt = cols[1].form_submit_button(label='Clear')

if submit_bt:
with st.spinner("Loading..."):
{% if chat_input_value_type == "list" %}
{{chat_input_name}} = parse_list_from_html({{chat_input_name}})
{% endif %}
submit({{flow_inputs_params}})

if clear_bt:
Expand Down

0 comments on commit f47660e

Please sign in to comment.