diff --git a/.github/workflows/samples_flows_standard_conditional_flow_for_if_else.yml b/.github/workflows/samples_flows_standard_conditional_flow_for_if_else.yml new file mode 100644 index 00000000000..de28a68a527 --- /dev/null +++ b/.github/workflows/samples_flows_standard_conditional_flow_for_if_else.yml @@ -0,0 +1,89 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_flows_standard_conditional_flow_for_if_else +on: + schedule: + - cron: "8 21 * * *" # Every day starting at 5:8 BJT + pull_request: + branches: [ main ] + paths: [ examples/flows/standard/conditional-flow-for-if-else/**, examples/*requirements.txt, .github/workflows/samples_flows_standard_conditional_flow_for_if_else.yml ] + workflow_dispatch: + +jobs: + samples_readme_ci: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Generate config.json + run: echo ${{ secrets.TEST_WORKSPACE_CONFIG_JSON }} > ${{ github.workspace }}/examples/config.json + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/flows/standard/conditional-flow-for-if-else + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + - name: Create run.yml + working-directory: examples/flows/standard/conditional-flow-for-if-else + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/flows/standard/conditional-flow-for-if-else/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/flows/standard/conditional-flow-for-if-else/README.md -o examples/flows/standard/conditional-flow-for-if-else + - name: Cat script + working-directory: examples/flows/standard/conditional-flow-for-if-else + run: | + cat bash_script.sh + - name: Run scripts + working-directory: examples/flows/standard/conditional-flow-for-if-else + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/flows/standard/conditional-flow-for-if-else + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/flows/standard/conditional-flow-for-if-else/bash_script.sh \ No newline at end of file diff --git a/examples/README.md b/examples/README.md index 2b6928d41b0..1f7332277c3 100644 --- a/examples/README.md +++ b/examples/README.md @@ -41,6 +41,7 @@ | [basic](flows/standard/basic/README.md) | [![samples_flows_standard_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_basic.yml) | A basic standard flow using custom python tool that calls Azure OpenAI with connection info stored in environment variables | | [basic-with-builtin-llm](flows/standard/basic-with-builtin-llm/README.md) | [![samples_flows_standard_basic_with_builtin_llm](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_basic_with_builtin_llm.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_basic_with_builtin_llm.yml) | A basic standard flow that calls Azure OpenAI with builtin llm tool | | [basic-with-connection](flows/standard/basic-with-connection/README.md) | [![samples_flows_standard_basic_with_connection](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_basic_with_connection.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_basic_with_connection.yml) | A basic standard flow that using custom python tool calls Azure OpenAI with connection info stored in custom connection | +| [conditional-flow-for-if-else](flows/standard/conditional-flow-for-if-else/README.md) | [![samples_flows_standard_conditional_flow_for_if_else](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_conditional_flow_for_if_else.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_conditional_flow_for_if_else.yml) | This example is a conditonal flow for if-else scenario | | [customer-intent-extraction](flows/standard/customer-intent-extraction/README.md) | [![samples_flows_standard_customer_intent_extraction](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_customer_intent_extraction.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_customer_intent_extraction.yml) | This sample is using OpenAI chat model(ChatGPT/GPT4) to identify customer intent from customer's question | | [flow-with-additional-includes](flows/standard/flow-with-additional-includes/README.md) | [![samples_flows_standard_flow_with_additional_includes](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_additional_includes.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_additional_includes.yml) | User sometimes need to reference some common files or folders, this sample demos how to solve the problem using additional_includes | | [flow-with-symlinks](flows/standard/flow-with-symlinks/README.md) | [![samples_flows_standard_flow_with_symlinks](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_symlinks.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_symlinks.yml) | User sometimes need to reference some common files or folders, this sample demos how to solve the problem using symlinks | diff --git a/examples/flows/standard/conditional-flow-for-if-else/README.md b/examples/flows/standard/conditional-flow-for-if-else/README.md new file mode 100644 index 00000000000..da887fbe644 --- /dev/null +++ b/examples/flows/standard/conditional-flow-for-if-else/README.md @@ -0,0 +1,57 @@ +# Conditional flow for if-else scenario + +This example is a conditonal flow for if-else scenario. + +In this flow, it checks if an input query passes content safety check. If it's denied, we'll return a default response; otherwise, we'll call LLM to get a response and then summarize the final results. + +The following are two execution situations of this flow: +- if input query passes content safety check: + ![content_safety_check_passed](content_safety_check_passed.png) +- else: + ![content_safety_check_failed](content_safety_check_failed.png) + +**Notice**: The `content_safety_check` and `llm_result` node in this flow are dummy nodes that do not actually use the conten safety tool and LLM tool. You can replace them with the real ones. Learn more: [LLM Tool](https://microsoft.github.io/promptflow/reference/tools-reference/llm-tool.html) + +By following this example, you will learn how to create a conditional flow using the `activate config`. + +## Prerequisites + +Install promptflow sdk and other dependencies: +```bash +pip install -r requirements.txt +``` + +## Run flow + +- Test flow +```bash +# test with default input value in flow.dag.yaml +pf flow test --flow . + +# test with flow inputs +pf flow test --flow . --inputs question="What is Prompt flow?" +``` + +- Create run with multiple lines of data +```bash +# create a random run name +run_name="conditional_flow_for_if_else_"$(openssl rand -hex 12) + +# create run +pf run create --flow . --data ./data.jsonl --stream --name $run_name +``` + +- List and show run metadata +```bash +# list created run +pf run list + +# show specific run detail +pf run show --name $run_name + +# show output +pf run show-details --name $run_name + +# visualize run in browser +pf run visualize --name $run_name +``` diff --git a/examples/flows/standard/conditional-flow-for-if-else/content_safety_check.py b/examples/flows/standard/conditional-flow-for-if-else/content_safety_check.py new file mode 100644 index 00000000000..79516e69fab --- /dev/null +++ b/examples/flows/standard/conditional-flow-for-if-else/content_safety_check.py @@ -0,0 +1,8 @@ +from promptflow import tool +import random + + +@tool +def content_safety_check(text: str) -> str: + # You can use a content safety node to replace this tool. + return random.choice([True, False]) diff --git a/examples/flows/standard/conditional-flow-for-if-else/content_safety_check_failed.png b/examples/flows/standard/conditional-flow-for-if-else/content_safety_check_failed.png new file mode 100644 index 00000000000..7195bf8e2e3 Binary files /dev/null and b/examples/flows/standard/conditional-flow-for-if-else/content_safety_check_failed.png differ diff --git a/examples/flows/standard/conditional-flow-for-if-else/content_safety_check_passed.png b/examples/flows/standard/conditional-flow-for-if-else/content_safety_check_passed.png new file mode 100644 index 00000000000..a0ab31854f5 Binary files /dev/null and b/examples/flows/standard/conditional-flow-for-if-else/content_safety_check_passed.png differ diff --git a/examples/flows/standard/conditional-flow-for-if-else/data.jsonl b/examples/flows/standard/conditional-flow-for-if-else/data.jsonl new file mode 100644 index 00000000000..9409a94d303 --- /dev/null +++ b/examples/flows/standard/conditional-flow-for-if-else/data.jsonl @@ -0,0 +1,2 @@ +{"question": "What is Prompt flow?"} +{"question": "What is ChatGPT?"} \ No newline at end of file diff --git a/examples/flows/standard/conditional-flow-for-if-else/default_result.py b/examples/flows/standard/conditional-flow-for-if-else/default_result.py new file mode 100644 index 00000000000..a4b547f337b --- /dev/null +++ b/examples/flows/standard/conditional-flow-for-if-else/default_result.py @@ -0,0 +1,6 @@ +from promptflow import tool + + +@tool +def default_result(question: str) -> str: + return f"I'm not familiar with your query: {question}." diff --git a/examples/flows/standard/conditional-flow-for-if-else/flow.dag.yaml b/examples/flows/standard/conditional-flow-for-if-else/flow.dag.yaml new file mode 100644 index 00000000000..909bb8234a9 --- /dev/null +++ b/examples/flows/standard/conditional-flow-for-if-else/flow.dag.yaml @@ -0,0 +1,47 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json +inputs: + question: + type: string + default: What is Prompt flow? +outputs: + answer: + type: string + reference: ${generate_result.output} +nodes: +- name: content_safety_check + type: python + source: + type: code + path: content_safety_check.py + inputs: + text: ${inputs.question} +- name: llm_result + type: python + source: + type: code + path: llm_result.py + inputs: + question: ${inputs.question} + activate: + when: ${content_safety_check.output} + is: true +- name: default_result + type: python + source: + type: code + path: default_result.py + inputs: + question: ${inputs.question} + activate: + when: ${content_safety_check.output} + is: false +- name: generate_result + type: python + source: + type: code + path: generate_result.py + inputs: + llm_result: ${llm_result.output} + default_result: ${default_result.output} +environment: + python_requirements_txt: requirements.txt diff --git a/examples/flows/standard/conditional-flow-for-if-else/generate_result.py b/examples/flows/standard/conditional-flow-for-if-else/generate_result.py new file mode 100644 index 00000000000..c238605bc22 --- /dev/null +++ b/examples/flows/standard/conditional-flow-for-if-else/generate_result.py @@ -0,0 +1,9 @@ +from promptflow import tool + + +@tool +def generate_result(llm_result="", default_result="") -> str: + if llm_result: + return llm_result + else: + return default_result diff --git a/examples/flows/standard/conditional-flow-for-if-else/llm_result.py b/examples/flows/standard/conditional-flow-for-if-else/llm_result.py new file mode 100644 index 00000000000..82b1910cc9e --- /dev/null +++ b/examples/flows/standard/conditional-flow-for-if-else/llm_result.py @@ -0,0 +1,10 @@ +from promptflow import tool + + +@tool +def llm_result(question: str) -> str: + # You can use an LLM node to replace this tool. + return ( + "Prompt flow is a suite of development tools designed to streamline " + "the end-to-end development cycle of LLM-based AI applications." + ) diff --git a/examples/flows/standard/conditional-flow-for-if-else/requirements.txt b/examples/flows/standard/conditional-flow-for-if-else/requirements.txt new file mode 100644 index 00000000000..34d068f5f1c --- /dev/null +++ b/examples/flows/standard/conditional-flow-for-if-else/requirements.txt @@ -0,0 +1,2 @@ +promptflow +promptflow-tools \ No newline at end of file