diff --git a/.cspell.json b/.cspell.json index eb928335f59..b176301cc7d 100644 --- a/.cspell.json +++ b/.cspell.json @@ -24,6 +24,9 @@ "**/*.xml", "**/*.txt", ".gitignore", + "examples/README.md", + "examples/flex-flows/README.md", + "examples/prompty/README.md", "scripts/docs/_build/**", "src/promptflow-azure/promptflow/azure/_restclient/flow/**", "src/promptflow-azure/promptflow/azure/_restclient/swagger.json", @@ -188,6 +191,8 @@ "otel", "OTLP", "spawnv", + "arxiv", + "autogen", "spawnve", "addrs", "pywin", diff --git a/.github/workflows/samples_connections.yml b/.github/workflows/samples_connections.yml index 3f39600de11..9ff9e437f1e 100644 --- a/.github/workflows/samples_connections.yml +++ b/.github/workflows/samples_connections.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/connections run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_connections_connection.yml b/.github/workflows/samples_connections_connection.yml index 92333dd5f81..b76af61d909 100644 --- a/.github/workflows/samples_connections_connection.yml +++ b/.github/workflows/samples_connections_connection.yml @@ -34,6 +34,22 @@ jobs: python -m pip install --upgrade pip pip install -r ${{ github.workspace }}/examples/requirements.txt pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: examples/connections + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create Aoai Connection run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" - name: Test Notebook diff --git a/.github/workflows/samples_flex_flows_basic.yml b/.github/workflows/samples_flex_flows_basic.yml new file mode 100644 index 00000000000..f7d47c03461 --- /dev/null +++ b/.github/workflows/samples_flex_flows_basic.yml @@ -0,0 +1,110 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_flex_flows_basic +on: + schedule: + - cron: "30 20 * * *" # Every day starting at 4:30 BJT + pull_request: + branches: [ main ] + paths: [ examples/flex-flows/basic/**, examples/*requirements.txt, .github/workflows/samples_flex_flows_basic.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_flex_flows_basic: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/flex-flows/basic + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create run.yml + working-directory: examples/flex-flows/basic + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/flex-flows/basic/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/flex-flows/basic/README.md -o examples/flex-flows/basic + - name: Cat script + working-directory: examples/flex-flows/basic + run: | + cat bash_script.sh + - name: Run scripts against canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + working-directory: examples/flex-flows/basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Run scripts against production workspace + if: github.event_name != 'schedule' + working-directory: examples/flex-flows/basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/flex-flows/basic + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/flex-flows/basic/bash_script.sh \ No newline at end of file diff --git a/.github/workflows/samples_flex_flows_chat_basic.yml b/.github/workflows/samples_flex_flows_chat_basic.yml new file mode 100644 index 00000000000..fd7ff158a73 --- /dev/null +++ b/.github/workflows/samples_flex_flows_chat_basic.yml @@ -0,0 +1,110 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_flex_flows_chat_basic +on: + schedule: + - cron: "9 20 * * *" # Every day starting at 4:9 BJT + pull_request: + branches: [ main ] + paths: [ examples/flex-flows/chat-basic/**, examples/*requirements.txt, .github/workflows/samples_flex_flows_chat_basic.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_flex_flows_chat_basic: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/flex-flows/chat-basic + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create run.yml + working-directory: examples/flex-flows/chat-basic + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/flex-flows/chat-basic/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/flex-flows/chat-basic/README.md -o examples/flex-flows/chat-basic + - name: Cat script + working-directory: examples/flex-flows/chat-basic + run: | + cat bash_script.sh + - name: Run scripts against canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + working-directory: examples/flex-flows/chat-basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Run scripts against production workspace + if: github.event_name != 'schedule' + working-directory: examples/flex-flows/chat-basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/flex-flows/chat-basic + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/flex-flows/chat-basic/bash_script.sh \ No newline at end of file diff --git a/.github/workflows/samples_flex_flows_eval_checklist.yml b/.github/workflows/samples_flex_flows_eval_checklist.yml new file mode 100644 index 00000000000..1f765ba6510 --- /dev/null +++ b/.github/workflows/samples_flex_flows_eval_checklist.yml @@ -0,0 +1,110 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_flex_flows_eval_checklist +on: + schedule: + - cron: "56 22 * * *" # Every day starting at 6:56 BJT + pull_request: + branches: [ main ] + paths: [ examples/flex-flows/eval-checklist/**, examples/*requirements.txt, .github/workflows/samples_flex_flows_eval_checklist.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_flex_flows_eval_checklist: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/flex-flows/eval-checklist + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create run.yml + working-directory: examples/flex-flows/eval-checklist + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/flex-flows/eval-checklist/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/flex-flows/eval-checklist/README.md -o examples/flex-flows/eval-checklist + - name: Cat script + working-directory: examples/flex-flows/eval-checklist + run: | + cat bash_script.sh + - name: Run scripts against canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + working-directory: examples/flex-flows/eval-checklist + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Run scripts against production workspace + if: github.event_name != 'schedule' + working-directory: examples/flex-flows/eval-checklist + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/flex-flows/eval-checklist + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/flex-flows/eval-checklist/bash_script.sh \ No newline at end of file diff --git a/.github/workflows/samples_flex_flows_eval_code_quality.yml b/.github/workflows/samples_flex_flows_eval_code_quality.yml new file mode 100644 index 00000000000..ce03b9bbaaf --- /dev/null +++ b/.github/workflows/samples_flex_flows_eval_code_quality.yml @@ -0,0 +1,110 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_flex_flows_eval_code_quality +on: + schedule: + - cron: "6 22 * * *" # Every day starting at 6:6 BJT + pull_request: + branches: [ main ] + paths: [ examples/flex-flows/eval-code-quality/**, examples/*requirements.txt, .github/workflows/samples_flex_flows_eval_code_quality.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_flex_flows_eval_code_quality: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/flex-flows/eval-code-quality + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create run.yml + working-directory: examples/flex-flows/eval-code-quality + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/flex-flows/eval-code-quality/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/flex-flows/eval-code-quality/README.md -o examples/flex-flows/eval-code-quality + - name: Cat script + working-directory: examples/flex-flows/eval-code-quality + run: | + cat bash_script.sh + - name: Run scripts against canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + working-directory: examples/flex-flows/eval-code-quality + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Run scripts against production workspace + if: github.event_name != 'schedule' + working-directory: examples/flex-flows/eval-code-quality + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/flex-flows/eval-code-quality + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/flex-flows/eval-code-quality/bash_script.sh \ No newline at end of file diff --git a/.github/workflows/samples_flexflows_basic_flexflowquickstart.yml b/.github/workflows/samples_flexflows_basic_flexflowquickstart.yml new file mode 100644 index 00000000000..f3b9ade3963 --- /dev/null +++ b/.github/workflows/samples_flexflows_basic_flexflowquickstart.yml @@ -0,0 +1,64 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_flexflows_basic_flexflowquickstart +on: + schedule: + - cron: "55 20 * * *" # Every day starting at 4:55 BJT + pull_request: + branches: [ main ] + paths: [ examples/flex-flows/basic/**, examples/*requirements.txt, .github/workflows/samples_flexflows_basic_flexflowquickstart.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_flexflows_basic_flexflowquickstart: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + run: | + python -m pip install --upgrade pip + pip install -r ${{ github.workspace }}/examples/requirements.txt + pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: examples/flex-flows/basic + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create Aoai Connection + run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" + - name: Test Notebook + working-directory: examples/flex-flows/basic + run: | + papermill -k python flex-flow-quickstart.ipynb flex-flow-quickstart.output.ipynb + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/flex-flows/basic diff --git a/.github/workflows/samples_flexflows_basic_flexflowquickstartazure.yml b/.github/workflows/samples_flexflows_basic_flexflowquickstartazure.yml new file mode 100644 index 00000000000..80809522575 --- /dev/null +++ b/.github/workflows/samples_flexflows_basic_flexflowquickstartazure.yml @@ -0,0 +1,54 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_flexflows_basic_flexflowquickstartazure +on: + schedule: + - cron: "10 22 * * *" # Every day starting at 6:10 BJT + pull_request: + branches: [ main ] + paths: [ examples/flex-flows/basic/**, examples/*requirements.txt, .github/workflows/samples_flexflows_basic_flexflowquickstartazure.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_flexflows_basic_flexflowquickstartazure: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Generate config.json for canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + run: echo '${{ secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ github.workspace }}/examples/config.json + - name: Generate config.json for production workspace + if: github.event_name != 'schedule' + run: echo '${{ secrets.EXAMPLE_WORKSPACE_CONFIG_JSON_PROD }}' > ${{ github.workspace }}/examples/config.json + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + run: | + python -m pip install --upgrade pip + pip install -r ${{ github.workspace }}/examples/requirements.txt + pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: Create Aoai Connection + run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" + - name: Test Notebook + working-directory: examples/flex-flows/basic + run: | + papermill -k python flex-flow-quickstart-azure.ipynb flex-flow-quickstart-azure.output.ipynb + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/flex-flows/basic diff --git a/.github/workflows/samples_flows_chat_chat_basic.yml b/.github/workflows/samples_flows_chat_chat_basic.yml index 132b9a33bbe..7bc42561771 100644 --- a/.github/workflows/samples_flows_chat_chat_basic.yml +++ b/.github/workflows/samples_flows_chat_chat_basic.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/chat/chat-basic run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_chat_chat_math_variant.yml b/.github/workflows/samples_flows_chat_chat_math_variant.yml index f9fb7f8c69c..831c4b11c2e 100644 --- a/.github/workflows/samples_flows_chat_chat_math_variant.yml +++ b/.github/workflows/samples_flows_chat_chat_math_variant.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/chat/chat-math-variant run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_chat_chat_with_pdf.yml b/.github/workflows/samples_flows_chat_chat_with_pdf.yml index 78544acbfdb..33403cd6741 100644 --- a/.github/workflows/samples_flows_chat_chat_with_pdf.yml +++ b/.github/workflows/samples_flows_chat_chat_with_pdf.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create AOAI Connection from ENV file working-directory: examples/flows/chat/chat-with-pdf run: | @@ -84,6 +89,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -94,6 +101,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_chat_chat_with_wikipedia.yml b/.github/workflows/samples_flows_chat_chat_with_wikipedia.yml index d4e82bec6e3..fd02f184c7b 100644 --- a/.github/workflows/samples_flows_chat_chat_with_wikipedia.yml +++ b/.github/workflows/samples_flows_chat_chat_with_wikipedia.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/chat/chat-with-wikipedia run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_chat_use_functions_with_chat_models.yml b/.github/workflows/samples_flows_chat_use_functions_with_chat_models.yml index e04f820b2fd..b5e4a232996 100644 --- a/.github/workflows/samples_flows_chat_use_functions_with_chat_models.yml +++ b/.github/workflows/samples_flows_chat_use_functions_with_chat_models.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/chat/use_functions_with_chat_models run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_evaluation_eval_basic.yml b/.github/workflows/samples_flows_evaluation_eval_basic.yml index c92f24e02ba..2b3648750d5 100644 --- a/.github/workflows/samples_flows_evaluation_eval_basic.yml +++ b/.github/workflows/samples_flows_evaluation_eval_basic.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/evaluation/eval-basic run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_evaluation_eval_chat_math.yml b/.github/workflows/samples_flows_evaluation_eval_chat_math.yml index b7881f872d9..b87208f9137 100644 --- a/.github/workflows/samples_flows_evaluation_eval_chat_math.yml +++ b/.github/workflows/samples_flows_evaluation_eval_chat_math.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/evaluation/eval-chat-math run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_evaluation_eval_classification_accuracy.yml b/.github/workflows/samples_flows_evaluation_eval_classification_accuracy.yml index 109e77b5aee..61c2565810d 100644 --- a/.github/workflows/samples_flows_evaluation_eval_classification_accuracy.yml +++ b/.github/workflows/samples_flows_evaluation_eval_classification_accuracy.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/evaluation/eval-classification-accuracy run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_evaluation_eval_entity_match_rate.yml b/.github/workflows/samples_flows_evaluation_eval_entity_match_rate.yml index 590bf74bbc0..f9e691ad694 100644 --- a/.github/workflows/samples_flows_evaluation_eval_entity_match_rate.yml +++ b/.github/workflows/samples_flows_evaluation_eval_entity_match_rate.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/evaluation/eval-entity-match-rate run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_evaluation_eval_groundedness.yml b/.github/workflows/samples_flows_evaluation_eval_groundedness.yml index 25268ffbc9f..071718b41ed 100644 --- a/.github/workflows/samples_flows_evaluation_eval_groundedness.yml +++ b/.github/workflows/samples_flows_evaluation_eval_groundedness.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/evaluation/eval-groundedness run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_evaluation_eval_perceived_intelligence.yml b/.github/workflows/samples_flows_evaluation_eval_perceived_intelligence.yml index 7cec59b0041..321d8a2616e 100644 --- a/.github/workflows/samples_flows_evaluation_eval_perceived_intelligence.yml +++ b/.github/workflows/samples_flows_evaluation_eval_perceived_intelligence.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/evaluation/eval-perceived-intelligence run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_evaluation_eval_qna_non_rag.yml b/.github/workflows/samples_flows_evaluation_eval_qna_non_rag.yml index 7f1da8c1228..f05f861c943 100644 --- a/.github/workflows/samples_flows_evaluation_eval_qna_non_rag.yml +++ b/.github/workflows/samples_flows_evaluation_eval_qna_non_rag.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/evaluation/eval-qna-non-rag run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_evaluation_eval_qna_rag_metrics.yml b/.github/workflows/samples_flows_evaluation_eval_qna_rag_metrics.yml index af18aadb29d..27200689cd8 100644 --- a/.github/workflows/samples_flows_evaluation_eval_qna_rag_metrics.yml +++ b/.github/workflows/samples_flows_evaluation_eval_qna_rag_metrics.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/evaluation/eval-qna-rag-metrics run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_evaluation_eval_summarization.yml b/.github/workflows/samples_flows_evaluation_eval_summarization.yml index b444be8b77d..e99299f3d37 100644 --- a/.github/workflows/samples_flows_evaluation_eval_summarization.yml +++ b/.github/workflows/samples_flows_evaluation_eval_summarization.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/evaluation/eval-summarization run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_autonomous_agent.yml b/.github/workflows/samples_flows_standard_autonomous_agent.yml index ec8e9210841..f701be9d708 100644 --- a/.github/workflows/samples_flows_standard_autonomous_agent.yml +++ b/.github/workflows/samples_flows_standard_autonomous_agent.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/autonomous-agent run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_basic.yml b/.github/workflows/samples_flows_standard_basic.yml index 5385627f8fc..e5d996919d3 100644 --- a/.github/workflows/samples_flows_standard_basic.yml +++ b/.github/workflows/samples_flows_standard_basic.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/basic run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_basic_with_builtin_llm.yml b/.github/workflows/samples_flows_standard_basic_with_builtin_llm.yml index 39d4bd19673..80ba5c7d834 100644 --- a/.github/workflows/samples_flows_standard_basic_with_builtin_llm.yml +++ b/.github/workflows/samples_flows_standard_basic_with_builtin_llm.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/basic-with-builtin-llm run: | @@ -77,6 +82,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -87,6 +94,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_basic_with_connection.yml b/.github/workflows/samples_flows_standard_basic_with_connection.yml index 6462b2de555..fda39607f8e 100644 --- a/.github/workflows/samples_flows_standard_basic_with_connection.yml +++ b/.github/workflows/samples_flows_standard_basic_with_connection.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/basic-with-connection run: | @@ -77,6 +82,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -87,6 +94,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_conditional_flow_for_if_else.yml b/.github/workflows/samples_flows_standard_conditional_flow_for_if_else.yml index 57d8cf6de4f..8529bb41f22 100644 --- a/.github/workflows/samples_flows_standard_conditional_flow_for_if_else.yml +++ b/.github/workflows/samples_flows_standard_conditional_flow_for_if_else.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/conditional-flow-for-if-else run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_conditional_flow_for_switch.yml b/.github/workflows/samples_flows_standard_conditional_flow_for_switch.yml index 448ad1d3905..c8dcefe7b45 100644 --- a/.github/workflows/samples_flows_standard_conditional_flow_for_switch.yml +++ b/.github/workflows/samples_flows_standard_conditional_flow_for_switch.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/conditional-flow-for-switch run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_customer_intent_extraction.yml b/.github/workflows/samples_flows_standard_customer_intent_extraction.yml index d0131b48383..f5a06a441a3 100644 --- a/.github/workflows/samples_flows_standard_customer_intent_extraction.yml +++ b/.github/workflows/samples_flows_standard_customer_intent_extraction.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/customer-intent-extraction run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_flow_with_additional_includes.yml b/.github/workflows/samples_flows_standard_flow_with_additional_includes.yml index 774f075025a..5a49124af91 100644 --- a/.github/workflows/samples_flows_standard_flow_with_additional_includes.yml +++ b/.github/workflows/samples_flows_standard_flow_with_additional_includes.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/flow-with-additional-includes run: | @@ -77,6 +82,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -87,6 +94,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_flow_with_symlinks.yml b/.github/workflows/samples_flows_standard_flow_with_symlinks.yml index 4d0d129148e..04892bd7c0b 100644 --- a/.github/workflows/samples_flows_standard_flow_with_symlinks.yml +++ b/.github/workflows/samples_flows_standard_flow_with_symlinks.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/flow-with-symlinks run: | @@ -77,6 +82,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -87,6 +94,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_gen_docstring.yml b/.github/workflows/samples_flows_standard_gen_docstring.yml index d6dc3640599..1e16c47c11a 100644 --- a/.github/workflows/samples_flows_standard_gen_docstring.yml +++ b/.github/workflows/samples_flows_standard_gen_docstring.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/gen-docstring run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_maths_to_code.yml b/.github/workflows/samples_flows_standard_maths_to_code.yml index e87c8d1eb30..28c2d995c08 100644 --- a/.github/workflows/samples_flows_standard_maths_to_code.yml +++ b/.github/workflows/samples_flows_standard_maths_to_code.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/maths-to-code run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_named_entity_recognition.yml b/.github/workflows/samples_flows_standard_named_entity_recognition.yml index 1f469a0e73d..367c0a077b9 100644 --- a/.github/workflows/samples_flows_standard_named_entity_recognition.yml +++ b/.github/workflows/samples_flows_standard_named_entity_recognition.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/named-entity-recognition run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_flows_standard_web_classification.yml b/.github/workflows/samples_flows_standard_web_classification.yml index 903d77fc909..316be1a7314 100644 --- a/.github/workflows/samples_flows_standard_web_classification.yml +++ b/.github/workflows/samples_flows_standard_web_classification.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/flows/standard/web-classification run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_getstarted_quickstart.yml b/.github/workflows/samples_getstarted_quickstart.yml index 0aeaeb39754..5e24857464a 100644 --- a/.github/workflows/samples_getstarted_quickstart.yml +++ b/.github/workflows/samples_getstarted_quickstart.yml @@ -34,6 +34,22 @@ jobs: python -m pip install --upgrade pip pip install -r ${{ github.workspace }}/examples/requirements.txt pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: examples/tutorials/get-started + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create Aoai Connection run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" - name: Test Notebook diff --git a/.github/workflows/samples_prompty_basic.yml b/.github/workflows/samples_prompty_basic.yml new file mode 100644 index 00000000000..f53332b4763 --- /dev/null +++ b/.github/workflows/samples_prompty_basic.yml @@ -0,0 +1,110 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_prompty_basic +on: + schedule: + - cron: "46 19 * * *" # Every day starting at 3:46 BJT + pull_request: + branches: [ main ] + paths: [ examples/prompty/basic/**, examples/*requirements.txt, .github/workflows/samples_prompty_basic.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_prompty_basic: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/prompty/basic + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create run.yml + working-directory: examples/prompty/basic + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/prompty/basic/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/prompty/basic/README.md -o examples/prompty/basic + - name: Cat script + working-directory: examples/prompty/basic + run: | + cat bash_script.sh + - name: Run scripts against canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + working-directory: examples/prompty/basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Run scripts against production workspace + if: github.event_name != 'schedule' + working-directory: examples/prompty/basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/prompty/basic + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/prompty/basic/bash_script.sh \ No newline at end of file diff --git a/.github/workflows/samples_prompty_basic_promptyquickstart.yml b/.github/workflows/samples_prompty_basic_promptyquickstart.yml new file mode 100644 index 00000000000..d8b0cfc6930 --- /dev/null +++ b/.github/workflows/samples_prompty_basic_promptyquickstart.yml @@ -0,0 +1,64 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_prompty_basic_promptyquickstart +on: + schedule: + - cron: "19 21 * * *" # Every day starting at 5:19 BJT + pull_request: + branches: [ main ] + paths: [ examples/prompty/basic/**, examples/*requirements.txt, .github/workflows/samples_prompty_basic_promptyquickstart.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_prompty_basic_promptyquickstart: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + run: | + python -m pip install --upgrade pip + pip install -r ${{ github.workspace }}/examples/requirements.txt + pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: examples/prompty/basic + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create Aoai Connection + run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" + - name: Test Notebook + working-directory: examples/prompty/basic + run: | + papermill -k python prompty-quickstart.ipynb prompty-quickstart.output.ipynb + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/prompty/basic diff --git a/.github/workflows/samples_prompty_chat_basic.yml b/.github/workflows/samples_prompty_chat_basic.yml new file mode 100644 index 00000000000..3edea145a1b --- /dev/null +++ b/.github/workflows/samples_prompty_chat_basic.yml @@ -0,0 +1,110 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_prompty_chat_basic +on: + schedule: + - cron: "47 20 * * *" # Every day starting at 4:47 BJT + pull_request: + branches: [ main ] + paths: [ examples/prompty/chat-basic/**, examples/*requirements.txt, .github/workflows/samples_prompty_chat_basic.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_prompty_chat_basic: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/prompty/chat-basic + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create run.yml + working-directory: examples/prompty/chat-basic + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/prompty/chat-basic/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/prompty/chat-basic/README.md -o examples/prompty/chat-basic + - name: Cat script + working-directory: examples/prompty/chat-basic + run: | + cat bash_script.sh + - name: Run scripts against canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + working-directory: examples/prompty/chat-basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Run scripts against production workspace + if: github.event_name != 'schedule' + working-directory: examples/prompty/chat-basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/prompty/chat-basic + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/prompty/chat-basic/bash_script.sh \ No newline at end of file diff --git a/.github/workflows/samples_prompty_chatbasic_chatwithprompty.yml b/.github/workflows/samples_prompty_chatbasic_chatwithprompty.yml new file mode 100644 index 00000000000..fffc75e2082 --- /dev/null +++ b/.github/workflows/samples_prompty_chatbasic_chatwithprompty.yml @@ -0,0 +1,64 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_prompty_chatbasic_chatwithprompty +on: + schedule: + - cron: "51 19 * * *" # Every day starting at 3:51 BJT + pull_request: + branches: [ main ] + paths: [ examples/prompty/chat-basic/**, examples/*requirements.txt, .github/workflows/samples_prompty_chatbasic_chatwithprompty.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_prompty_chatbasic_chatwithprompty: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + run: | + python -m pip install --upgrade pip + pip install -r ${{ github.workspace }}/examples/requirements.txt + pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: examples/prompty/chat-basic + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create Aoai Connection + run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" + - name: Test Notebook + working-directory: examples/prompty/chat-basic + run: | + papermill -k python chat-with-prompty.ipynb chat-with-prompty.output.ipynb + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/prompty/chat-basic diff --git a/.github/workflows/samples_prompty_eval_apology.yml b/.github/workflows/samples_prompty_eval_apology.yml new file mode 100644 index 00000000000..4c69e09f365 --- /dev/null +++ b/.github/workflows/samples_prompty_eval_apology.yml @@ -0,0 +1,110 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_prompty_eval_apology +on: + schedule: + - cron: "10 22 * * *" # Every day starting at 6:10 BJT + pull_request: + branches: [ main ] + paths: [ examples/prompty/eval-apology/**, examples/*requirements.txt, .github/workflows/samples_prompty_eval_apology.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_prompty_eval_apology: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/prompty/eval-apology + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create run.yml + working-directory: examples/prompty/eval-apology + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/prompty/eval-apology/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/prompty/eval-apology/README.md -o examples/prompty/eval-apology + - name: Cat script + working-directory: examples/prompty/eval-apology + run: | + cat bash_script.sh + - name: Run scripts against canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + working-directory: examples/prompty/eval-apology + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Run scripts against production workspace + if: github.event_name != 'schedule' + working-directory: examples/prompty/eval-apology + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/prompty/eval-apology + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/prompty/eval-apology/bash_script.sh \ No newline at end of file diff --git a/.github/workflows/samples_prompty_eval_basic.yml b/.github/workflows/samples_prompty_eval_basic.yml new file mode 100644 index 00000000000..e690e054a7c --- /dev/null +++ b/.github/workflows/samples_prompty_eval_basic.yml @@ -0,0 +1,110 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_prompty_eval_basic +on: + schedule: + - cron: "25 21 * * *" # Every day starting at 5:25 BJT + pull_request: + branches: [ main ] + paths: [ examples/prompty/eval-basic/**, examples/*requirements.txt, .github/workflows/samples_prompty_eval_basic.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_prompty_eval_basic: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/prompty/eval-basic + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create run.yml + working-directory: examples/prompty/eval-basic + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/prompty/eval-basic/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/prompty/eval-basic/README.md -o examples/prompty/eval-basic + - name: Cat script + working-directory: examples/prompty/eval-basic + run: | + cat bash_script.sh + - name: Run scripts against canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + working-directory: examples/prompty/eval-basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Run scripts against production workspace + if: github.event_name != 'schedule' + working-directory: examples/prompty/eval-basic + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/prompty/eval-basic + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/prompty/eval-basic/bash_script.sh \ No newline at end of file diff --git a/.github/workflows/samples_runmanagement_runmanagement.yml b/.github/workflows/samples_runmanagement_runmanagement.yml index afa13f6f3aa..6411a96a04a 100644 --- a/.github/workflows/samples_runmanagement_runmanagement.yml +++ b/.github/workflows/samples_runmanagement_runmanagement.yml @@ -34,6 +34,22 @@ jobs: python -m pip install --upgrade pip pip install -r ${{ github.workspace }}/examples/requirements.txt pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: examples/tutorials/run-management + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create Aoai Connection run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" - name: Test Notebook diff --git a/.github/workflows/samples_tools_use_cases_cascading_inputs_tool_showcase.yml b/.github/workflows/samples_tools_use_cases_cascading_inputs_tool_showcase.yml index 7f53b84b926..19ec7e657fe 100644 --- a/.github/workflows/samples_tools_use_cases_cascading_inputs_tool_showcase.yml +++ b/.github/workflows/samples_tools_use_cases_cascading_inputs_tool_showcase.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tools/use-cases/cascading-inputs-tool-showcase run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tools_use_cases_custom_llm_tool_showcase.yml b/.github/workflows/samples_tools_use_cases_custom_llm_tool_showcase.yml index 01ba24cbe9f..a180436d95b 100644 --- a/.github/workflows/samples_tools_use_cases_custom_llm_tool_showcase.yml +++ b/.github/workflows/samples_tools_use_cases_custom_llm_tool_showcase.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tools/use-cases/custom_llm_tool_showcase run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tools_use_cases_custom_strong_type_connection_package_tool_showcase.yml b/.github/workflows/samples_tools_use_cases_custom_strong_type_connection_package_tool_showcase.yml index 9705846f644..fa2381b5d86 100644 --- a/.github/workflows/samples_tools_use_cases_custom_strong_type_connection_package_tool_showcase.yml +++ b/.github/workflows/samples_tools_use_cases_custom_strong_type_connection_package_tool_showcase.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tools/use-cases/custom-strong-type-connection-package-tool-showcase run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tools_use_cases_custom_strong_type_connection_script_tool_showcase.yml b/.github/workflows/samples_tools_use_cases_custom_strong_type_connection_script_tool_showcase.yml index 307db728697..86a851067e5 100644 --- a/.github/workflows/samples_tools_use_cases_custom_strong_type_connection_script_tool_showcase.yml +++ b/.github/workflows/samples_tools_use_cases_custom_strong_type_connection_script_tool_showcase.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tools/use-cases/custom-strong-type-connection-script-tool-showcase run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tools_use_cases_dynamic_list_input_tool_showcase.yml b/.github/workflows/samples_tools_use_cases_dynamic_list_input_tool_showcase.yml index ecd70f2dc9f..e1634874782 100644 --- a/.github/workflows/samples_tools_use_cases_dynamic_list_input_tool_showcase.yml +++ b/.github/workflows/samples_tools_use_cases_dynamic_list_input_tool_showcase.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tools/use-cases/dynamic-list-input-tool-showcase run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml b/.github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml new file mode 100644 index 00000000000..881698533d2 --- /dev/null +++ b/.github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml @@ -0,0 +1,69 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_tracing_autogengroupchat_traceautogengroupchat +on: + schedule: + - cron: "11 20 * * *" # Every day starting at 4:11 BJT + pull_request: + branches: [ main ] + paths: [ examples/tutorials/tracing/autogen-groupchat/**, .github/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml, examples/requirements.txt, examples/connections/azure_openai.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_tracing_autogengroupchat_traceautogengroupchat: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + run: | + python -m pip install --upgrade pip + pip install -r ${{ github.workspace }}/examples/requirements.txt + pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: examples/tutorials/tracing/autogen-groupchat + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + if [[ -e OAI_CONFIG_LIST.json.example ]]; then + echo "OAI_CONFIG_LIST replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" OAI_CONFIG_LIST.json.example + mv OAI_CONFIG_LIST.json.example OAI_CONFIG_LIST.json + fi + - name: Create Aoai Connection + run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" + - name: Test Notebook + working-directory: examples/tutorials/tracing/autogen-groupchat + run: | + papermill -k python trace-autogen-groupchat.ipynb trace-autogen-groupchat.output.ipynb + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/tutorials/tracing/autogen-groupchat diff --git a/.github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml b/.github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml new file mode 100644 index 00000000000..faa074d98ea --- /dev/null +++ b/.github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml @@ -0,0 +1,64 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_tracing_customotlpcollector_otlptracecollector +on: + schedule: + - cron: "22 21 * * *" # Every day starting at 5:22 BJT + pull_request: + branches: [ main ] + paths: [ examples/tutorials/tracing/custom-otlp-collector/**, .github/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml, examples/requirements.txt, examples/connections/azure_openai.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_tracing_customotlpcollector_otlptracecollector: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + run: | + python -m pip install --upgrade pip + pip install -r ${{ github.workspace }}/examples/requirements.txt + pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: examples/tutorials/tracing/custom-otlp-collector + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create Aoai Connection + run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" + - name: Test Notebook + working-directory: examples/tutorials/tracing/custom-otlp-collector + run: | + papermill -k python otlp-trace-collector.ipynb otlp-trace-collector.output.ipynb + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/tutorials/tracing/custom-otlp-collector diff --git a/.github/workflows/samples_tracing_langchain_tracelangchain.yml b/.github/workflows/samples_tracing_langchain_tracelangchain.yml new file mode 100644 index 00000000000..e3c910edd89 --- /dev/null +++ b/.github/workflows/samples_tracing_langchain_tracelangchain.yml @@ -0,0 +1,64 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_tracing_langchain_tracelangchain +on: + schedule: + - cron: "21 19 * * *" # Every day starting at 3:21 BJT + pull_request: + branches: [ main ] + paths: [ examples/tutorials/tracing/langchain/**, .github/workflows/samples_tracing_langchain_tracelangchain.yml, examples/requirements.txt, examples/connections/azure_openai.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_tracing_langchain_tracelangchain: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + run: | + python -m pip install --upgrade pip + pip install -r ${{ github.workspace }}/examples/requirements.txt + pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: examples/tutorials/tracing/langchain + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create Aoai Connection + run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" + - name: Test Notebook + working-directory: examples/tutorials/tracing/langchain + run: | + papermill -k python trace-langchain.ipynb trace-langchain.output.ipynb + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/tutorials/tracing/langchain diff --git a/.github/workflows/samples_tutorials_e2e_development_chat_with_pdf.yml b/.github/workflows/samples_tutorials_e2e_development_chat_with_pdf.yml index d078e8bab60..c2b8df00363 100644 --- a/.github/workflows/samples_tutorials_e2e_development_chat_with_pdf.yml +++ b/.github/workflows/samples_tutorials_e2e_development_chat_with_pdf.yml @@ -54,6 +54,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create AOAI Connection from ENV file working-directory: examples/tutorials/e2e-development run: | @@ -90,6 +95,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -100,6 +107,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tutorials_flow_deploy_azure_app_service.yml b/.github/workflows/samples_tutorials_flow_deploy_azure_app_service.yml index a353e7682a0..aa98df76f43 100644 --- a/.github/workflows/samples_tutorials_flow_deploy_azure_app_service.yml +++ b/.github/workflows/samples_tutorials_flow_deploy_azure_app_service.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tutorials/flow-deploy/azure-app-service run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tutorials_flow_deploy_create_service_with_flow.yml b/.github/workflows/samples_tutorials_flow_deploy_create_service_with_flow.yml index 4976b5884f0..dfcf3338a0d 100644 --- a/.github/workflows/samples_tutorials_flow_deploy_create_service_with_flow.yml +++ b/.github/workflows/samples_tutorials_flow_deploy_create_service_with_flow.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tutorials/flow-deploy/create-service-with-flow run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tutorials_flow_deploy_distribute_flow_as_executable_app.yml b/.github/workflows/samples_tutorials_flow_deploy_distribute_flow_as_executable_app.yml index c72fd439c44..f78641fa64a 100644 --- a/.github/workflows/samples_tutorials_flow_deploy_distribute_flow_as_executable_app.yml +++ b/.github/workflows/samples_tutorials_flow_deploy_distribute_flow_as_executable_app.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tutorials/flow-deploy/distribute-flow-as-executable-app run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tutorials_flow_deploy_docker.yml b/.github/workflows/samples_tutorials_flow_deploy_docker.yml index fa503ff22fc..cdb7a7d6aff 100644 --- a/.github/workflows/samples_tutorials_flow_deploy_docker.yml +++ b/.github/workflows/samples_tutorials_flow_deploy_docker.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tutorials/flow-deploy/docker run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tutorials_flow_deploy_kubernetes.yml b/.github/workflows/samples_tutorials_flow_deploy_kubernetes.yml index 48c506b7b18..c92ac56861a 100644 --- a/.github/workflows/samples_tutorials_flow_deploy_kubernetes.yml +++ b/.github/workflows/samples_tutorials_flow_deploy_kubernetes.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tutorials/flow-deploy/kubernetes run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tutorials_flow_fine_tuning_evaluation_promptflow_quality_improvement.yml b/.github/workflows/samples_tutorials_flow_fine_tuning_evaluation_promptflow_quality_improvement.yml index 125ef7799d2..a57c4a2bb3a 100644 --- a/.github/workflows/samples_tutorials_flow_fine_tuning_evaluation_promptflow_quality_improvement.yml +++ b/.github/workflows/samples_tutorials_flow_fine_tuning_evaluation_promptflow_quality_improvement.yml @@ -48,6 +48,11 @@ jobs: sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create run.yml working-directory: examples/tutorials/flow-fine-tuning-evaluation run: | @@ -74,6 +79,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -84,6 +91,8 @@ jobs: run: | export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/.github/workflows/samples_tutorials_tracing.yml b/.github/workflows/samples_tutorials_tracing.yml new file mode 100644 index 00000000000..279495b93c4 --- /dev/null +++ b/.github/workflows/samples_tutorials_tracing.yml @@ -0,0 +1,110 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_tutorials_tracing +on: + schedule: + - cron: "18 22 * * *" # Every day starting at 6:18 BJT + pull_request: + branches: [ main ] + paths: [ examples/tutorials/tracing/**, examples/tutorials/tracing//**, .github/workflows/samples_tutorials_tracing.yml, examples/requirements.txt, examples/connections/azure_openai.yml ] + workflow_dispatch: + +env: + IS_IN_CI_PIPELINE: "true" + +jobs: + samples_tutorials_tracing: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/tutorials/tracing + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + - name: Create run.yml + working-directory: examples/tutorials/tracing + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/tutorials/tracing/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/tutorials/tracing/README.md -o examples/tutorials/tracing + - name: Cat script + working-directory: examples/tutorials/tracing + run: | + cat bash_script.sh + - name: Run scripts against canary workspace (scheduled runs only) + if: github.event_name == 'schedule' + working-directory: examples/tutorials/tracing + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Run scripts against production workspace + if: github.event_name != 'schedule' + working-directory: examples/tutorials/tracing + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_PROD }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/tutorials/tracing + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/tutorials/tracing/bash_script.sh \ No newline at end of file diff --git a/docs/concepts/concept-flows.md b/docs/concepts/concept-flows.md index 1c0c85d1112..15e37906295 100644 --- a/docs/concepts/concept-flows.md +++ b/docs/concepts/concept-flows.md @@ -1,27 +1,43 @@ -While how LLMs work may be elusive to many developers, how LLM apps work is not - they essentially involve a series of calls to external services such as LLMs/databases/search engines, or intermediate data processing, all glued together. Thus LLM apps are merely Directed Acyclic Graphs (DAGs) of function calls. These DAGs are flows in prompt flow. +While how LLMs work may be elusive to many developers, how LLM apps work is not - they essentially involve a series of calls to external services such as LLMs/databases/search engines, or intermediate data processing, all glued together. # Flows +## Flex flow + +You can create LLM apps using a Python function or class as the entry point, which encapsulating your app logic. You can directly test or run these with pure code experience. Or you can define a `flow.flex.yaml` that points to these entries, which enables testing, running, or viewing traces via the [Promptflow VS Code Extension](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow). + +Our [examples](https://github.com/microsoft/promptflow/tree/main/examples/flex-flows) should also give you an idea how to write `flex flows`. + +## DAG flow + +Thus LLM apps can be defined as Directed Acyclic Graphs (DAGs) of function calls. These DAGs are flows in prompt flow. + A flow in prompt flow is a DAG of functions (we call them [tools](./concept-tools.md)). These functions/tools connected via input/output dependencies and executed based on the topology by prompt flow executor. -A flow is represented as a YAML file and can be visualized with our [Prompt flow for VS Code extension](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow). Here is an example: +A flow is represented as a YAML file and can be visualized with our [Prompt flow for VS Code extension](https://marketplace.visualstudio.com/items?itemName=prompt-flow.prompt-flow). Here is an example `flow.dag.yaml`: ![flow_dag](../media/how-to-guides/quick-start/flow_dag.png) +Please refer to our [examples](https://github.com/microsoft/promptflow/tree/main/examples/flows) to learn how to write a `DAG flow`. + ## Flow types -Prompt flow has three flow types: +Prompt flow examples organize flows by three categories: -- **Standard flow** and **Chat flow**: these two are for you to develop your LLM application. The primary difference between the two lies in the additional support provided by the "Chat Flow" for chat applications. For instance, you can define chat_history, chat_input, and chat_output for your flow. The prompt flow, in turn, will offer a chat-like experience (including conversation history) during the development of the flow. Moreover, it also provides a sample chat application for deployment purposes. +- **Standard flow** or **Chat flow**: these two are for you to develop your LLM application. The primary difference between the two lies in the additional support provided by the "Chat Flow" for chat applications. For instance, you can define `chat_history`, `chat_input`, and `chat_output` for your flow. The prompt flow, in turn, will offer a chat-like experience (including conversation history) during the development of the flow. Moreover, it also provides a sample chat application for deployment purposes. - **Evaluation flow** is for you to test/evaluate the quality of your LLM application (standard/chat flow). It usually run on the outputs of standard/chat flow, and compute some metrics that can be used to determine whether the standard/chat flow performs well. E.g. is the answer accurate? is the answer fact-based? -## When to use standard flow vs. chat flow? -As a general guideline, if you are building a chatbot that needs to maintain conversation history, try chat flow. In most other cases, standard flow should serve your needs. +Flex flow [examples](https://github.com/microsoft/promptflow/tree/main/examples/flex-flows): +- [basic](https://github.com/microsoft/promptflow/tree/main/examples/flex-flows/basic) +- [chat-basic](https://github.com/microsoft/promptflow/tree/main/examples/flex-flows/chat-basic) +- [eval-basic](https://github.com/microsoft/promptflow/tree/main/examples/flex-flows/eval-basic) + +DAG flow [examples](https://github.com/microsoft/promptflow/tree/main/examples/flows): +- [basic](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard/basic) +- [chat-basic](https://github.com/microsoft/promptflow/tree/main/examples/flows/chat/chat-basic) +- [eval-basic](https://github.com/microsoft/promptflow/tree/main/examples/flows/evaluation/eval-basic) -Our examples should also give you an idea when to use what: -- [examples/flows/standard](https://github.com/microsoft/promptflow/tree/main/examples/flows/standard) -- [examples/flows/chat](https://github.com/microsoft/promptflow/tree/main/examples/flows/chat) ## Next steps diff --git a/docs/how-to-guides/index.md b/docs/how-to-guides/index.md index c4481cf628d..821b9a0a860 100644 --- a/docs/how-to-guides/index.md +++ b/docs/how-to-guides/index.md @@ -2,6 +2,12 @@ Simple and short articles grouped by topics, each introduces a core feature of prompt flow and how you can use it to address your specific use cases. +```{toctree} +:caption: Tracing +:maxdepth: 1 +tracing/index +``` + ```{toctree} :caption: Flow :maxdepth: 1 diff --git a/docs/how-to-guides/tracing/index.md b/docs/how-to-guides/tracing/index.md new file mode 100644 index 00000000000..f1056ffed38 --- /dev/null +++ b/docs/how-to-guides/tracing/index.md @@ -0,0 +1,118 @@ +# Tracing + +:::{admonition} Experimental feature +This is an experimental feature, and may change at any time. Learn [more](../faq.md#stable-vs-experimental). +::: + +Prompt flow provides the trace feature to capture and visualize the internal execution details for all flows. + +For `DAG flow`, user can track and visualize node level inputs/outputs of flow execution, it provides critical insights for developer to understand the internal details of execution. + +For `Flex flow` developers, who might use different frameworks (langchain, semantic kernel, OpenAI, kinds of agents) to create LLM based applications, prompt flow allow user to instrument their code in a [OpenTelemetry](https://opentelemetry.io/) compatible way, and visualize using UI provided by promptflow devkit. + +## Instrumenting user's code + +### **`start_trace()` to enable trace for LLM calls** +Let's start with the simplest example, add single line code to enable trace for LLM calls in your application. +```python +from openai import OpenAI +from promptflow.tracing import start_trace + +# start_trace() will print a url for trace detail visualization +start_trace() + +client = OpenAI() + +completion = client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."}, + {"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."} + ] +) + +print(completion.choices[0].message) +``` + +Running above python script will produce below example output: +``` +Prompt flow service has started... +You can view the traces from local: http://localhost:/v1.0/ui/traces/?#collection=basic +``` + +Click the trace url, user will see a trace list that corresponding to each LLM calls: +![LLM-trace-list](../../media/trace/LLM-trace-list.png) + + +Click on one line record, the LLM detail will be displayed with chat window experience, together with other LLM call params: +![LLM-trace-detail](../../media/trace/LLM-trace-detail.png) + +Promptflow tracing works for more frameworks like `autogen` and `langchain`: + +1. Example: **[Add trace for Autogen](https://github.com/microsoft/promptflow/tree/main/examples/tutorials/tracing/autogen-groupchat/)** + +![autogen-trace-detail](../../media/trace/autogen-trace-detail.png) + +2. Example: **[Add trace for Langchain](https://github.com/microsoft/promptflow/tree/main/examples/tutorials/tracing/langchain)** + +![langchain-trace-detail](../../media/trace/langchain-trace-detail.png) + +### **`@trace` to trace on any function** +A more common scenario is the application has complicated code structure, and developer would like to add trace on critical path that they would like to debug and monitor. + +See the **[math_to_code](https://github.com/microsoft/promptflow/tree/main/examples/tutorials/tracing/math_to_code.py)** example. Execute below command will get an URL to display the trace records and trace details of each test. + +```python +from promptflow.tracing import trace +# trace your function +@trace +def code_gen(client: AzureOpenAI, question: str) -> str: + sys_prompt = ( + "I want you to act as a Math expert specializing in Algebra, Geometry, and Calculus. " + "Given the question, develop python code to model the user's question. " + "Make sure only reply the executable code, no other words." + ) + completion = client.chat.completions.create( + model=os.getenv("CHAT_DEPLOYMENT_NAME", "gpt-35-turbo"), + messages=[ + { + "role": "system", + "content": sys_prompt, + }, + {"role": "user", "content": question}, + ], + ) + raw_code = completion.choices[0].message.content + result = code_refine(raw_code) + return result +``` + +```shell +python math_to_code.py +``` + +## Trace visualization in flow test and batch run + +### Flow test +If your application is created with DAG flow, all flow test and batch run will be automatically enable trace function. Take the **[chat_with_pdf](https://github.com/microsoft/promptflow/tree/main/examples/flows/chat/chat-with-pdf/)** as example. + +Run `pf flow test --flow .`, each flow test will generate single line in the trace UI: +![flow-trace-record](../../media/trace/flow-trace-records.png) + +Click a record, the trace details will be visualized as tree view. + +![flow-trace-detail](../../media/trace/flow-trace-detail.png) + +### Evaluate against batch data +Keep using **[chat_with_pdf](https://github.com/microsoft/promptflow/tree/main/examples/flows/chat/chat-with-pdf)** as example, to trigger a batch run, you can use below commands: + +```shell +pf run create -f batch_run.yaml +``` +Or +```shell +pf run create --flow . --data "./data/bert-paper-qna.jsonl" --column-mapping chat_history='${data.chat_history}' pdf_url='${data.pdf_url}' question='${data.question}' +``` +Then you will get a run related trace URL, e.g. http://localhost:/v1.0/ui/traces?run=chat_with_pdf_20240226_181222_219335 + +![batch_run_record](../../media/trace/batch_run_record.png) \ No newline at end of file diff --git a/docs/media/trace/LLM-trace-detail.png b/docs/media/trace/LLM-trace-detail.png new file mode 100644 index 00000000000..758bc7331f6 Binary files /dev/null and b/docs/media/trace/LLM-trace-detail.png differ diff --git a/docs/media/trace/LLM-trace-list.png b/docs/media/trace/LLM-trace-list.png new file mode 100644 index 00000000000..4049106d849 Binary files /dev/null and b/docs/media/trace/LLM-trace-list.png differ diff --git a/docs/media/trace/at-trace-detail.png b/docs/media/trace/at-trace-detail.png new file mode 100644 index 00000000000..d2155ac181e Binary files /dev/null and b/docs/media/trace/at-trace-detail.png differ diff --git a/docs/media/trace/autogen-trace-detail.png b/docs/media/trace/autogen-trace-detail.png new file mode 100644 index 00000000000..49a7783a25a Binary files /dev/null and b/docs/media/trace/autogen-trace-detail.png differ diff --git a/docs/media/trace/batch_run_record.png b/docs/media/trace/batch_run_record.png new file mode 100644 index 00000000000..df18d4e6a58 Binary files /dev/null and b/docs/media/trace/batch_run_record.png differ diff --git a/docs/media/trace/flow-trace-detail.png b/docs/media/trace/flow-trace-detail.png new file mode 100644 index 00000000000..3319e37619b Binary files /dev/null and b/docs/media/trace/flow-trace-detail.png differ diff --git a/docs/media/trace/flow-trace-records.png b/docs/media/trace/flow-trace-records.png new file mode 100644 index 00000000000..7b2e4480972 Binary files /dev/null and b/docs/media/trace/flow-trace-records.png differ diff --git a/docs/media/trace/langchain-trace-detail.png b/docs/media/trace/langchain-trace-detail.png new file mode 100644 index 00000000000..01590353ade Binary files /dev/null and b/docs/media/trace/langchain-trace-detail.png differ diff --git a/examples/README.md b/examples/README.md index 9ce7e31ca3b..f4846fcc2f7 100644 --- a/examples/README.md +++ b/examples/README.md @@ -33,6 +33,27 @@ | [docker](tutorials/flow-deploy/docker/README.md) | [![samples_tutorials_flow_deploy_docker](https://github.com/microsoft/promptflow/actions/workflows/samples_tutorials_flow_deploy_docker.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_tutorials_flow_deploy_docker.yml) | This example demos how to deploy flow as a docker app | | [kubernetes](tutorials/flow-deploy/kubernetes/README.md) | [![samples_tutorials_flow_deploy_kubernetes](https://github.com/microsoft/promptflow/actions/workflows/samples_tutorials_flow_deploy_kubernetes.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_tutorials_flow_deploy_kubernetes.yml) | This example demos how to deploy flow as a Kubernetes app | | [promptflow-quality-improvement](tutorials/flow-fine-tuning-evaluation/promptflow-quality-improvement.md) | [![samples_tutorials_flow_fine_tuning_evaluation_promptflow_quality_improvement](https://github.com/microsoft/promptflow/actions/workflows/samples_tutorials_flow_fine_tuning_evaluation_promptflow_quality_improvement.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_tutorials_flow_fine_tuning_evaluation_promptflow_quality_improvement.yml) | This tutorial is designed to enhance your understanding of improving flow quality through prompt tuning and evaluation | +| [tracing](tutorials/tracing/README.md) | [![samples_tutorials_tracing](https://github.com/microsoft/promptflow/actions/workflows/samples_tutorials_tracing.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_tutorials_tracing.yml) | Prompt flow provides the tracing feature to capture and visualize the internal execution details for all flows | + + +### Prompty ([prompty](prompty)) + +| path | status | description | +------|--------|------------- +| [basic](prompty/basic/README.md) | [![samples_prompty_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_basic.yml) | A basic prompt that uses the chat API to answer questions, with connection configured using environment variables | +| [chat-basic](prompty/chat-basic/README.md) | [![samples_prompty_chat_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_chat_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_chat_basic.yml) | A prompt that uses the chat API to answer questions with chat history, leveraging promptflow connection | +| [eval-apology](prompty/eval-apology/README.md) | [![samples_prompty_eval_apology](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_eval_apology.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_eval_apology.yml) | A prompt that determines whether a chat conversation contains an apology from the assistant | +| [eval-basic](prompty/eval-basic/README.md) | [![samples_prompty_eval_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_eval_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_eval_basic.yml) | Basic evaluator prompt for QA scenario | + + +### Flex Flows ([flex-flows](flex-flows)) + +| path | status | description | +------|--------|------------- +| [basic](flex-flows/basic/README.md) | [![samples_flex_flows_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_basic.yml) | A basic standard flow define using function entry that calls Azure OpenAI with connection info stored in environment variables | +| [chat-basic](flex-flows/chat-basic/README.md) | [![samples_flex_flows_chat_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_chat_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_chat_basic.yml) | A basic chat flow defined using class entry | +| [eval-checklist](flex-flows/eval-checklist/README.md) | [![samples_flex_flows_eval_checklist](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_eval_checklist.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_eval_checklist.yml) | A example flow defined using class entry which demos how to evaluate the answer pass user specified check list | +| [eval-code-quality](flex-flows/eval-code-quality/README.md) | [![samples_flex_flows_eval_code_quality](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_eval_code_quality.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_eval_code_quality.yml) | A example flow defined using function entry which shows how to evaluate the quality of code snippet | ### Flows ([flows](flows)) @@ -113,7 +134,14 @@ | [pipeline.ipynb](tutorials/run-flow-with-pipeline/pipeline.ipynb) | [![samples_runflowwithpipeline_pipeline](https://github.com/microsoft/promptflow/actions/workflows/samples_runflowwithpipeline_pipeline.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_runflowwithpipeline_pipeline.yml) | Create pipeline using components to run a distributed job with tensorflow | | [cloud-run-management.ipynb](tutorials/run-management/cloud-run-management.ipynb) | [![samples_runmanagement_cloudrunmanagement](https://github.com/microsoft/promptflow/actions/workflows/samples_runmanagement_cloudrunmanagement.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_runmanagement_cloudrunmanagement.yml) | Flow run management in Azure AI | | [run-management.ipynb](tutorials/run-management/run-management.ipynb) | [![samples_runmanagement_runmanagement](https://github.com/microsoft/promptflow/actions/workflows/samples_runmanagement_runmanagement.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_runmanagement_runmanagement.yml) | Flow run management | +| [trace-autogen-groupchat.ipynb](tutorials/tracing/autogen-groupchat/trace-autogen-groupchat.ipynb) | [![samples_tracing_autogengroupchat_traceautogengroupchat](https://github.com/microsoft/promptflow/actions/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_tracing_autogengroupchat_traceautogengroupchat.yml) | Tracing LLM calls in autogen group chat application | +| [otlp-trace-collector.ipynb](tutorials/tracing/custom-otlp-collector/otlp-trace-collector.ipynb) | [![samples_tracing_customotlpcollector_otlptracecollector](https://github.com/microsoft/promptflow/actions/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_tracing_customotlpcollector_otlptracecollector.yml) | A tutorial on how to levarage custom OTLP collector. | +| [trace-langchain.ipynb](tutorials/tracing/langchain/trace-langchain.ipynb) | [![samples_tracing_langchain_tracelangchain](https://github.com/microsoft/promptflow/actions/workflows/samples_tracing_langchain_tracelangchain.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_tracing_langchain_tracelangchain.yml) | Tracing LLM calls in langchain application | | [connection.ipynb](connections/connection.ipynb) | [![samples_connections_connection](https://github.com/microsoft/promptflow/actions/workflows/samples_connections_connection.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_connections_connection.yml) | Manage various types of connections using sdk | +| [flex-flow-quickstart-azure.ipynb](flex-flows/basic/flex-flow-quickstart-azure.ipynb) | [![samples_flexflows_basic_flexflowquickstartazure](https://github.com/microsoft/promptflow/actions/workflows/samples_flexflows_basic_flexflowquickstartazure.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flexflows_basic_flexflowquickstartazure.yml) | A quickstart tutorial to run a flex flow and evaluate it in azure. | +| [flex-flow-quickstart.ipynb](flex-flows/basic/flex-flow-quickstart.ipynb) | [![samples_flexflows_basic_flexflowquickstart](https://github.com/microsoft/promptflow/actions/workflows/samples_flexflows_basic_flexflowquickstart.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flexflows_basic_flexflowquickstart.yml) | A quickstart tutorial to run a flex flow and evaluate it. | +| [prompty-quickstart.ipynb](prompty/basic/prompty-quickstart.ipynb) | [![samples_prompty_basic_promptyquickstart](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_basic_promptyquickstart.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_basic_promptyquickstart.yml) | A quickstart tutorial to run a prompty and evaluate it. | +| [chat-with-prompty.ipynb](prompty/chat-basic/chat-with-prompty.ipynb) | [![samples_prompty_chatbasic_chatwithprompty](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_chatbasic_chatwithprompty.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_chatbasic_chatwithprompty.yml) | A quickstart tutorial to run a chat prompty and evaluate it. | | [chat-with-pdf-azure.ipynb](flows/chat/chat-with-pdf/chat-with-pdf-azure.ipynb) | [![samples_flows_chat_chatwithpdf_chatwithpdfazure](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_chat_chatwithpdf_chatwithpdfazure.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_chat_chatwithpdf_chatwithpdfazure.yml) | A tutorial of chat-with-pdf flow that executes in Azure AI | | [chat-with-pdf.ipynb](flows/chat/chat-with-pdf/chat-with-pdf.ipynb) | [![samples_flows_chat_chatwithpdf_chatwithpdf](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_chat_chatwithpdf_chatwithpdf.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_chat_chatwithpdf_chatwithpdf.yml) | A tutorial of chat-with-pdf flow that allows user ask questions about the content of a PDF file and get answers | diff --git a/examples/flex-flows/.env.example b/examples/flex-flows/.env.example new file mode 100644 index 00000000000..4083fa3c5ad --- /dev/null +++ b/examples/flex-flows/.env.example @@ -0,0 +1,2 @@ +AZURE_OPENAI_API_KEY= +AZURE_OPENAI_ENDPOINT= diff --git a/examples/flex-flows/README.md b/examples/flex-flows/README.md new file mode 100644 index 00000000000..797211015ad --- /dev/null +++ b/examples/flex-flows/README.md @@ -0,0 +1,18 @@ +# Flex Flow + +You can learn more on flex flow with examples in this folder. + +## SDK examples + +| path | status | description | +------|--------|------------- +| [flex-flow-quickstart.ipynb](./basic/flex-flow-quickstart.ipynb) | [![samples_flexflows_basic_flexflowquickstart](https://github.com/microsoft/promptflow/actions/workflows/samples_flexflows_basic_flexflowquickstart.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flexflows_basic_flexflowquickstart.yml) | A quickstart tutorial to run a flex flow and evaluate it. | +| [flex-flow-quickstart-azure.ipynb](./basic/flex-flow-quickstart-azure.ipynb) | [![samples_flexflows_basic_flexflowquickstartazure](https://github.com/microsoft/promptflow/actions/workflows/samples_flexflows_basic_flexflowquickstartazure.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flexflows_basic_flexflowquickstartazure.yml) | A quickstart tutorial to run a flex flow and evaluate it in azure. | + +## CLI examples +| path | status | description | +------|--------|------------- +| [basic](./basic/README.md) | [![samples_flex_flows_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_basic.yml) | A basic standard flow define using function entry that calls Azure OpenAI with connection info stored in environment variables | +| [chat-basic](./chat-basic/README.md) | [![samples_flex_flows_chat_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_chat_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_chat_basic.yml) | A basic chat flow defined using class entry | +| [eval-checklist](./eval-checklist/README.md) | [![samples_flex_flows_eval_checklist](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_eval_checklist.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_eval_checklist.yml) | A example flow defined using class entry which demos how to evaluate the answer pass user specified check list | +| [eval-code-quality](./eval-code-quality/README.md) | [![samples_flex_flows_eval_code_quality](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_eval_code_quality.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flex_flows_eval_code_quality.yml) | A example flow defined using function entry which shows how to evaluate the quality of code snippet | diff --git a/examples/flex-flows/basic/README.md b/examples/flex-flows/basic/README.md new file mode 100644 index 00000000000..7d8bca0847c --- /dev/null +++ b/examples/flex-flows/basic/README.md @@ -0,0 +1,110 @@ +# Basic standard flow +A basic standard flow define using function entry that calls Azure OpenAI with connection info stored in environment variables. + +## Prerequisites + +Install promptflow sdk and other dependencies: +```bash +pip install -r requirements.txt +``` + +## Run flow + +- Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. + +- Setup environment variables + +Ensure you have put your azure open ai endpoint key in [.env](../.env) file. You can create one refer to this [example file](../.env.example). + +```bash +cat ../.env +``` + +- Run/Debug as normal Python file +```bash +python programmer.py +``` + +- Test flow with connection + +Storing connection info in .env with plaintext is not safe. We recommend to use `pf connection` to guard secrets like `api_key` from leak. + +- Show or create `open_ai_connection` +```bash +# create connection from `azure_openai.yml` file +# Override keys with --set to avoid yaml file changes +pf connection create --file ../../connections/azure_openai.yml --set api_key= api_base= + +# check if connection exists +pf connection show -n open_ai_connection +``` + +```bash +# test with default input value in flow.flex.yaml +pf flow test --flow . + +# test with flow inputs +pf flow test --flow . --inputs text="Java Hello World!" + +``` + +- Create run with multiple lines data +```bash +# using environment from .env file (loaded in user code: hello.py) +pf run create --flow . --data ./data.jsonl --column-mapping text='${data.text}' --stream +``` + +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + +- List and show run meta +```bash +# list created run +pf run list + +# get a sample run name + +name=$(pf run list -r 10 | jq '.[] | select(.name | contains("basic_")) | .name'| head -n 1 | tr -d '"') +# show specific run detail +pf run show --name $name + +# show output +pf run show-details --name $name + +# visualize run in browser +pf run visualize --name $name +``` + +## Run flow in cloud with connection +- Assume we already have a connection named `open_ai_connection` in workspace. +```bash +# set default workspace +az account set -s +az configure --defaults group= workspace= +``` + +- Create run +```bash +# run with environment variable reference connection in azureml workspace +pfazure run create --flow . --data ./data.jsonl --column-mapping text='${data.text}' --stream +# run using yaml file +pfazure run create --file run.yml --stream +``` + +- List and show run meta +```bash +# list created run +pfazure run list -r 3 + +# get a sample run name +name=$(pfazure run list -r 100 | jq '.[] | select(.name | contains("basic_")) | .name'| head -n 1 | tr -d '"') + +# show specific run detail +pfazure run show --name $name + +# show output +pfazure run show-details --name $name + +# visualize run in browser +pfazure run visualize --name $name +``` diff --git a/examples/flex-flows/basic/data.jsonl b/examples/flex-flows/basic/data.jsonl new file mode 100644 index 00000000000..d71f1ca42a2 --- /dev/null +++ b/examples/flex-flows/basic/data.jsonl @@ -0,0 +1,3 @@ +{"text": "Python Hello World!"} +{"text": "C Hello World!"} +{"text": "C# Hello World!"} diff --git a/examples/flex-flows/basic/flex-flow-quickstart-azure.ipynb b/examples/flex-flows/basic/flex-flow-quickstart-azure.ipynb new file mode 100644 index 00000000000..644cdec9f7a --- /dev/null +++ b/examples/flex-flows/basic/flex-flow-quickstart-azure.ipynb @@ -0,0 +1,263 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Getting started with flex flow in Azure\n", + "\n", + "**Learning Objectives** - Upon completing this tutorial, you should be able to:\n", + "\n", + "- Write LLM application using notebook and visualize the trace of your application.\n", + "- Convert the application into a flow and batch run against multi lines of data.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Install dependent packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture --no-stderr\n", + "%pip install -r ./requirements-azure.txt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Connection to workspace" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Configure credential\n", + "\n", + "We are using `DefaultAzureCredential` to get access to workspace. \n", + "`DefaultAzureCredential` should be capable of handling most Azure SDK authentication scenarios. \n", + "\n", + "Reference for more available credentials if it does not work for you: [configure credential example](../../configuration.ipynb), [azure-identity reference doc](https://docs.microsoft.com/en-us/python/api/azure-identity/azure.identity?view=azure-python)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from azure.identity import DefaultAzureCredential, InteractiveBrowserCredential\n", + "\n", + "try:\n", + " credential = DefaultAzureCredential()\n", + " # Check if given credential can get token successfully.\n", + " credential.get_token(\"https://management.azure.com/.default\")\n", + "except Exception as ex:\n", + " # Fall back to InteractiveBrowserCredential in case DefaultAzureCredential not work\n", + " credential = InteractiveBrowserCredential()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Get a handle to the workspace\n", + "\n", + "We use config file to connect to a workspace. The Azure ML workspace should be configured with computer cluster. [Check this notebook for configure a workspace](../../configuration.ipynb)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.azure import PFClient\n", + "\n", + "# Get a handle to workspace\n", + "pf = PFClient.from_config(credential=credential)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create necessary connections\n", + "Connection helps securely store and manage secret keys or other sensitive credentials required for interacting with LLM and other external tools for example Azure Content Safety.\n", + "\n", + "In this notebook, we will use flow `basic` flex flow which uses connection `open_ai_connection` inside, we need to set up the connection if we haven't added it before.\n", + "\n", + "Prepare your Azure OpenAI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one.\n", + "\n", + "Please go to [workspace portal](https://ml.azure.com/), click `Prompt flow` -> `Connections` -> `Create`, then follow the instruction to create your own connections. \n", + "Learn more on [connections](https://learn.microsoft.com/en-us/azure/machine-learning/prompt-flow/concept-connections?view=azureml-api-2)." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Batch run the function as flow with multi-line data\n", + "\n", + "Create a [flow.flex.yaml](flow.flex.yaml) file to define a flow which entry pointing to the python function we defined.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# show the flow.flex.yaml content\n", + "with open(\"flow.flex.yaml\") as fin:\n", + " print(fin.read())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Batch run with a data file (with multiple lines of test data)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "flow = \".\" # path to the flow directory\n", + "data = \"./data.jsonl\" # path to the data file\n", + "\n", + "# create run with the flow and data\n", + "base_run = pf.run(\n", + " flow=flow,\n", + " data=data,\n", + " column_mapping={\n", + " \"text\": \"${data.text}\",\n", + " },\n", + " stream=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "details = pf.get_details(base_run)\n", + "details.head(10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Evaluate your flow\n", + "Then you can use an evaluation method to evaluate your flow. The evaluation methods are also flows which usually using LLM assert the produced output matches certain expectation. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run evaluation on the previous batch run\n", + "The **base_run** is the batch run we completed in step 2 above, for web-classification flow with \"data.jsonl\" as input." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "eval_flow = \"../eval-code-quality/flow.flex.yaml\"\n", + "\n", + "eval_run = pf.run(\n", + " flow=eval_flow,\n", + " data=\"./data.jsonl\", # path to the data file\n", + " run=base_run, # specify base_run as the run you want to evaluate\n", + " column_mapping={\n", + " \"code\": \"${run.outputs.output}\",\n", + " },\n", + " stream=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "details = pf.get_details(eval_run)\n", + "details.head(10)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "metrics = pf.get_metrics(eval_run)\n", + "print(json.dumps(metrics, indent=4))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pf.visualize([base_run, eval_run])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Next Steps\n", + "\n", + "By now you've successfully run your first prompt flow and even did evaluation on it. That's great!\n", + "\n", + "You can check out more examples:\n", + "- [Basic Chat](../chat-basic/README.md): demonstrates how to create a chatbot that can remember previous interactions and use the conversation history to generate next message." + ] + } + ], + "metadata": { + "description": "A quickstart tutorial to run a flex flow and evaluate it in azure.", + "kernelspec": { + "display_name": "prompt_flow", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + }, + "resources": "examples/requirements-azure.txt, examples/flex-flows/basic, examples/flex-flows/eval-code-quality" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/flex-flows/basic/flex-flow-quickstart.ipynb b/examples/flex-flows/basic/flex-flow-quickstart.ipynb new file mode 100644 index 00000000000..232622c48cc --- /dev/null +++ b/examples/flex-flows/basic/flex-flow-quickstart.ipynb @@ -0,0 +1,334 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Getting started with flex flow\n", + "\n", + "**Learning Objectives** - Upon completing this tutorial, you should be able to:\n", + "\n", + "- Write LLM application using notebook and visualize the trace of your application.\n", + "- Convert the application into a flow and batch run against multi lines of data.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Install dependent packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture --no-stderr\n", + "%pip install -r ./requirements.txt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Trace your application with promptflow\n", + "\n", + "Assume we already have a python function that calls OpenAI API. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with open(\"llm.py\") as fin:\n", + " print(fin.read())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: before running below cell, please configure required environment variable `AZURE_OPENAI_API_KEY`, `AZURE_OPENAI_ENDPOINT` by create an `.env` file. Please refer to [.env.example](.env.example) as an template." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# control the AOAI deployment (model) used in this example\n", + "deployment_name = \"gpt-35-turbo\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from llm import my_llm_tool\n", + "\n", + "# pls configure `AZURE_OPENAI_API_KEY`, `AZURE_OPENAI_ENDPOINT` environment variables first\n", + "result = my_llm_tool(\n", + " prompt=\"Write a simple Hello, world! program that displays the greeting message when executed. Output code only.\",\n", + " deployment_name=deployment_name,\n", + ")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualize trace by using start_trace\n", + "\n", + "Note we add `@trace` in the `my_llm_tool` function, re-run below cell will collect a trace in trace UI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.tracing import start_trace\n", + "\n", + "# start a trace session, and print a url for user to check trace\n", + "start_trace()\n", + "# rerun the function, which will be recorded in the trace\n", + "result = my_llm_tool(\n", + " prompt=\"Write a simple Hello, world! program that displays the greeting message when executed. Output code only.\",\n", + " deployment_name=deployment_name,\n", + ")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now, let's add another layer of function call. In [programmer.py](programmer.py) there is a function called `write_simple_program`, which calls a new function called `load_prompt` and previous `my_llm_tool` function." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# show the programmer.py content\n", + "with open(\"programmer.py\") as fin:\n", + " print(fin.read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# call the flow entry function\n", + "from programmer import write_simple_program\n", + "\n", + "result = write_simple_program(\"Java Hello, world!\")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Eval the result " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2\n", + "\n", + "import paths # add the code_quality module to the path\n", + "from code_quality import eval_code\n", + "\n", + "eval_result = eval_code(result)\n", + "eval_result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Batch run the function as flow with multi-line data\n", + "\n", + "Create a [flow.flex.yaml](flow.flex.yaml) file to define a flow which entry pointing to the python function we defined.\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# show the flow.flex.yaml content\n", + "with open(\"flow.flex.yaml\") as fin:\n", + " print(fin.read())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Batch run with a data file (with multiple lines of test data)\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.client import PFClient\n", + "\n", + "pf = PFClient()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data = \"./data.jsonl\" # path to the data file\n", + "# create run with the flow function and data\n", + "base_run = pf.run(\n", + " flow=write_simple_program,\n", + " data=data,\n", + " column_mapping={\n", + " \"text\": \"${data.text}\",\n", + " },\n", + " stream=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "details = pf.get_details(base_run)\n", + "details.head(10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Evaluate your flow\n", + "Then you can use an evaluation method to evaluate your flow. The evaluation methods are also flows which usually using LLM assert the produced output matches certain expectation. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run evaluation on the previous batch run\n", + "The **base_run** is the batch run we completed in step 2 above, for web-classification flow with \"data.jsonl\" as input." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# we can also run flow pointing to yaml file\n", + "eval_flow = \"../eval-code-quality/flow.flex.yaml\"\n", + "\n", + "eval_run = pf.run(\n", + " flow=eval_flow,\n", + " data=\"./data.jsonl\", # path to the data file\n", + " run=base_run, # specify base_run as the run you want to evaluate\n", + " column_mapping={\n", + " \"code\": \"${run.outputs.output}\",\n", + " },\n", + " stream=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "details = pf.get_details(eval_run)\n", + "details.head(10)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "\n", + "metrics = pf.get_metrics(eval_run)\n", + "print(json.dumps(metrics, indent=4))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "pf.visualize([base_run, eval_run])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Next Steps\n", + "\n", + "By now you've successfully run your first prompt flow and even did evaluation on it. That's great!\n", + "\n", + "You can check out more examples:\n", + "- [Basic Chat](../chat-basic/README.md): demonstrates how to create a chatbot that can remember previous interactions and use the conversation history to generate next message." + ] + } + ], + "metadata": { + "description": "A quickstart tutorial to run a flex flow and evaluate it.", + "kernelspec": { + "display_name": "prompt_flow", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + }, + "resources": "examples/requirements.txt, examples/flex-flows/basic" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/flex-flows/basic/flow.flex.yaml b/examples/flex-flows/basic/flow.flex.yaml new file mode 100644 index 00000000000..fa95b3a516f --- /dev/null +++ b/examples/flex-flows/basic/flow.flex.yaml @@ -0,0 +1,10 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json +entry: programmer:write_simple_program +environment: + # image: mcr.microsoft.com/azureml/promptflow/promptflow-python + python_requirements_txt: requirements.txt +environment_variables: + # environment variables from connection + AZURE_OPENAI_API_KEY: ${open_ai_connection.api_key} + AZURE_OPENAI_ENDPOINT: ${open_ai_connection.api_base} + AZURE_OPENAI_API_TYPE: azure diff --git a/examples/flex-flows/basic/hello.jinja2 b/examples/flex-flows/basic/hello.jinja2 new file mode 100644 index 00000000000..738367f7cf7 --- /dev/null +++ b/examples/flex-flows/basic/hello.jinja2 @@ -0,0 +1,3 @@ +system: +Write a simple {{text}} program. +Output code only. \ No newline at end of file diff --git a/examples/flex-flows/basic/llm.py b/examples/flex-flows/basic/llm.py new file mode 100644 index 00000000000..af1ac68667c --- /dev/null +++ b/examples/flex-flows/basic/llm.py @@ -0,0 +1,80 @@ +import os + +from dotenv import load_dotenv +from openai.version import VERSION as OPENAI_VERSION + +from promptflow.tracing import trace + + +def get_client(): + if OPENAI_VERSION.startswith("0."): + raise Exception( + "Please upgrade your OpenAI package to version >= 1.0.0 or using the command: pip install --upgrade openai." + ) + api_key = os.environ.get("OPENAI_API_KEY", None) + if api_key: + from openai import OpenAI + + return OpenAI() + else: + from openai import AzureOpenAI + + return AzureOpenAI( + api_version=os.environ.get("OPENAI_API_VERSION", "2023-07-01-preview") + ) + + +@trace +def my_llm_tool( + prompt: str, + # for AOAI, deployment name is customized by user, not model name. + deployment_name: str, + max_tokens: int = 120, + temperature: float = 1.0, + top_p: float = 1.0, + n: int = 1, + logprobs: int = None, + stop: list = None, + presence_penalty: float = 0, + frequency_penalty: float = 0, + logit_bias: dict = {}, + user: str = "", + **kwargs, +) -> str: + if "OPENAI_API_KEY" not in os.environ and "AZURE_OPENAI_API_KEY" not in os.environ: + # load environment variables from .env file + load_dotenv() + + if "OPENAI_API_KEY" not in os.environ and "AZURE_OPENAI_API_KEY" not in os.environ: + raise Exception( + "Please specify environment variables: OPENAI_API_KEY or AZURE_OPENAI_API_KEY" + ) + messages = [{"content": prompt, "role": "system"}] + response = get_client().chat.completions.create( + # prompt=prompt, + messages=messages, + model=deployment_name, + max_tokens=int(max_tokens), + temperature=float(temperature), + top_p=float(top_p), + n=int(n), + logprobs=int(logprobs) if logprobs else None, + # fix bug "[] is not valid under any of the given schemas-'stop'" + stop=stop if stop else None, + presence_penalty=float(presence_penalty), + frequency_penalty=float(frequency_penalty), + # Logit bias must be a dict if we passed it to openai api. + logit_bias=logit_bias if logit_bias else {}, + user=user, + ) + + # get first element because prompt is single. + return response.choices[0].message.content + + +if __name__ == "__main__": + result = my_llm_tool( + prompt="Write a simple Hello, world! program that displays the greeting message.", + deployment_name="text-davinci-003", + ) + print(result) diff --git a/examples/flex-flows/basic/paths.py b/examples/flex-flows/basic/paths.py new file mode 100644 index 00000000000..c2ea6db9ffb --- /dev/null +++ b/examples/flex-flows/basic/paths.py @@ -0,0 +1,6 @@ +import sys +import pathlib + +# Add the path to the evaluation code quality module +code_path = str(pathlib.Path(__file__).parent / "../eval-code-quality") +sys.path.insert(0, code_path) diff --git a/examples/flex-flows/basic/programmer.py b/examples/flex-flows/basic/programmer.py new file mode 100644 index 00000000000..db8467d773c --- /dev/null +++ b/examples/flex-flows/basic/programmer.py @@ -0,0 +1,41 @@ +from pathlib import Path +from typing import TypedDict + +from jinja2 import Template +from llm import my_llm_tool + +from promptflow.tracing import trace + +BASE_DIR = Path(__file__).absolute().parent + + +class Result(TypedDict): + output: str + + +@trace +def load_prompt(jinja2_template: str, text: str) -> str: + """Load prompt function.""" + with open(BASE_DIR / jinja2_template, "r", encoding="utf-8") as f: + prompt = Template( + f.read(), trim_blocks=True, keep_trailing_newline=True + ).render(text=text) + return prompt + + +@trace +def write_simple_program( + text: str = "Hello World!", deployment_name="gpt-35-turbo" +) -> Result: + """Ask LLM to write a simple program.""" + prompt = load_prompt("hello.jinja2", text) + output = my_llm_tool(prompt=prompt, deployment_name=deployment_name, max_tokens=120) + return Result(output=output) + + +if __name__ == "__main__": + from promptflow.tracing import start_trace + + start_trace() + result = write_simple_program("Hello, world!", "gpt-35-turbo") + print(result) diff --git a/examples/flex-flows/basic/requirements-azure.txt b/examples/flex-flows/basic/requirements-azure.txt new file mode 100644 index 00000000000..f72e46bfbb6 --- /dev/null +++ b/examples/flex-flows/basic/requirements-azure.txt @@ -0,0 +1 @@ +promptflow-azure diff --git a/examples/flex-flows/basic/requirements.txt b/examples/flex-flows/basic/requirements.txt new file mode 100644 index 00000000000..006ac2f55a8 --- /dev/null +++ b/examples/flex-flows/basic/requirements.txt @@ -0,0 +1,2 @@ +promptflow-core +python-dotenv diff --git a/examples/flex-flows/basic/run.yml b/examples/flex-flows/basic/run.yml new file mode 100644 index 00000000000..1838ebd4eb0 --- /dev/null +++ b/examples/flex-flows/basic/run.yml @@ -0,0 +1,5 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json +flow: . +data: data.jsonl +column_mapping: + text: ${data.text} diff --git a/examples/flex-flows/chat-basic/README.md b/examples/flex-flows/chat-basic/README.md new file mode 100644 index 00000000000..932f6de5cb2 --- /dev/null +++ b/examples/flex-flows/chat-basic/README.md @@ -0,0 +1,117 @@ +# Basic chat +A basic chat flow defined using class entry. It demonstrates how to create a chatbot that can remember previous interactions and use the conversation history to generate next message. + +## Prerequisites + +Install promptflow sdk and other dependencies in this folder: +```bash +pip install -r requirements.txt +``` + +## What you will learn + +In this flow, you will learn +- how to compose a chat flow. +- prompt template format of LLM tool chat api. Message delimiter is a separate line containing role name and colon: "system:", "user:", "assistant:". +See OpenAI Chat for more about message role. + ```jinja + system: + You are a chatbot having a conversation with a human. + + user: + {{question}} + ``` +- how to consume chat history in prompt. + ```jinja + {% for item in chat_history %} + user: + {{item.inputs.question}} + assistant: + {{item.outputs.answer}} + {% endfor %} + ``` + +## Run flow + +- Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. + +- Setup connection + +Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of LLM tool supported connection types and fill in the configurations. + +Or use CLI to create connection: + +```bash +# Override keys with --set to avoid yaml file changes +pf connection create --file ../../connections/azure_openai.yml --set api_key= api_base= --name open_ai_connection +``` + +Note in [flow.flex.yaml](flow.flex.yaml) we are using connection named `open_ai_connection`. +```bash +# show registered connection +pf connection show --name open_ai_connection +``` + +- Run as normal Python file + +```bash +python flow.py +``` + +- Test flow +You'll need to write flow entry `flow.flex.yaml` to test with prompt flow. + +```bash +# run chat flow with default question in flow.flex.yaml +pf flow test --flow . --init connection=open_ai_connection + +# run chat flow with new question +pf flow test --flow . --init connection=open_ai_connection --inputs question="What's Azure Machine Learning?" + +pf flow test --flow . --init connection=open_ai_connection --inputs question="What is ChatGPT? Please explain with consise statement." +``` + +- Create run with multiple lines data + +```bash +pf run create --flow . --init connection=open_ai_connection --data ./data.jsonl --column-mapping question='${data.question}' --stream +``` + +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + +- List and show run meta +```bash +# list created run +pf run list + +# get a sample run name + +name=$(pf run list -r 10 | jq '.[] | select(.name | contains("chat_basic_")) | .name'| head -n 1 | tr -d '"') +# show specific run detail +pf run show --name $name + +# show output +pf run show-details --name $name + +# visualize run in browser +pf run visualize --name $name +``` + +## Run flow in cloud + +- Assume we already have a connection named `open_ai_connection` in workspace. + +```bash +# set default workspace +az account set -s +az configure --defaults group= workspace= +``` + +- Create run + +```bash +# run with environment variable reference connection in azureml workspace +pfazure run create --flow . --init connection=open_ai_connection --data ./data.jsonl --column-mapping question='${data.question}' --stream +# run using yaml file +pfazure run create --file run.yml --stream diff --git a/examples/flex-flows/chat-basic/chat.jinja2 b/examples/flex-flows/chat-basic/chat.jinja2 new file mode 100644 index 00000000000..c5e811e1969 --- /dev/null +++ b/examples/flex-flows/chat-basic/chat.jinja2 @@ -0,0 +1,12 @@ +system: +You are a helpful assistant. + +{% for item in chat_history %} +user: +{{item.inputs.question}} +assistant: +{{item.outputs.answer}} +{% endfor %} + +user: +{{question}} \ No newline at end of file diff --git a/examples/flex-flows/chat-basic/data.jsonl b/examples/flex-flows/chat-basic/data.jsonl new file mode 100644 index 00000000000..34b2fb42025 --- /dev/null +++ b/examples/flex-flows/chat-basic/data.jsonl @@ -0,0 +1,2 @@ +{"question": "What is Prompt flow?", "statements": {"correctness": "should explain what's 'Prompt flow'"}} +{"question": "What is ChatGPT? Please explain with consise statement", "statements": { "correctness": "should explain what's ChatGPT", "consise": "It is a consise statement."}} \ No newline at end of file diff --git a/examples/flex-flows/chat-basic/flow.flex.yaml b/examples/flex-flows/chat-basic/flow.flex.yaml new file mode 100644 index 00000000000..ea0410185ec --- /dev/null +++ b/examples/flex-flows/chat-basic/flow.flex.yaml @@ -0,0 +1,5 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json +entry: flow:ChatFlow +environment: + # image: mcr.microsoft.com/azureml/promptflow/promptflow-python + python_requirements_txt: requirements.txt diff --git a/examples/flex-flows/chat-basic/flow.py b/examples/flex-flows/chat-basic/flow.py new file mode 100644 index 00000000000..dafafe80841 --- /dev/null +++ b/examples/flex-flows/chat-basic/flow.py @@ -0,0 +1,59 @@ +from dataclasses import dataclass +from pathlib import Path + +from jinja2 import Template + +from promptflow.tracing import trace +from promptflow.connections import AzureOpenAIConnection +from promptflow.tools.aoai import chat + +BASE_DIR = Path(__file__).absolute().parent + + +@trace +def load_prompt(jinja2_template: str, question: str, chat_history: list) -> str: + """Load prompt function.""" + with open(BASE_DIR / jinja2_template, "r", encoding="utf-8") as f: + tmpl = Template(f.read(), trim_blocks=True, keep_trailing_newline=True) + prompt = tmpl.render(question=question, chat_history=chat_history) + return prompt + + +@dataclass +class Result: + answer: str + + +class ChatFlow: + def __init__(self, connection: AzureOpenAIConnection): + self.connection = connection + + def __call__( + self, question: str = "What is ChatGPT?", chat_history: list = None + ) -> Result: + """Flow entry function.""" + + chat_history = chat_history or [] + + prompt = load_prompt("chat.jinja2", question, chat_history) + + output = chat( + connection=self.connection, + prompt=prompt, + deployment_name="gpt-35-turbo", + max_tokens=256, + temperature=0.7, + ) + return Result(answer=output) + + +if __name__ == "__main__": + from promptflow.tracing import start_trace + from promptflow.client import PFClient + + start_trace() + pf = PFClient() + connection = pf.connections.get("open_ai_connection", with_secrets=True) + flow = ChatFlow(connection=connection) + result = flow("What's Azure Machine Learning?", []) + print(result) diff --git a/examples/flex-flows/chat-basic/requirements.txt b/examples/flex-flows/chat-basic/requirements.txt new file mode 100644 index 00000000000..55a002e12f8 --- /dev/null +++ b/examples/flex-flows/chat-basic/requirements.txt @@ -0,0 +1,2 @@ +promptflow-core +promptflow-tools \ No newline at end of file diff --git a/examples/flex-flows/chat-basic/run.yml b/examples/flex-flows/chat-basic/run.yml new file mode 100644 index 00000000000..4e419997bed --- /dev/null +++ b/examples/flex-flows/chat-basic/run.yml @@ -0,0 +1,7 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json +flow: . +data: data.jsonl +init: + connection: open_ai_connection +column_mapping: + question: ${data.question} diff --git a/examples/flex-flows/chat-basic/sample.json b/examples/flex-flows/chat-basic/sample.json new file mode 100644 index 00000000000..b1af8226725 --- /dev/null +++ b/examples/flex-flows/chat-basic/sample.json @@ -0,0 +1 @@ +{"question": "What is Prompt flow?"} \ No newline at end of file diff --git a/examples/flex-flows/eval-checklist/README.md b/examples/flex-flows/eval-checklist/README.md new file mode 100644 index 00000000000..e06ae496bd9 --- /dev/null +++ b/examples/flex-flows/eval-checklist/README.md @@ -0,0 +1,89 @@ +# Eval Check List +A example flow defined using class entry which demos how to evaluate the answer pass user specified check list. + +## Prerequisites + +Install promptflow sdk and other dependencies: +```bash +pip install -r requirements.txt +``` + +## Run flow + +- Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. + +- Setup connection + +Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of LLM tool supported connection types and fill in the configurations. + +Or use CLI to create connection: + +```bash +# Override keys with --set to avoid yaml file changes +pf connection create --file ../../connections/azure_openai.yml --set api_key= api_base= --name open_ai_connection +``` + +Note in [flow.flex.yaml](flow.flex.yaml) we are using connection named `open_ai_connection`. + +```bash +# show registered connection +pf connection show --name open_ai_connection +``` + +- Run as normal Python file + +```bash +python check_list.py +``` + +- Test flow +You'll need to write flow entry `flow.flex.yaml` to test with prompt flow. + +```bash +pf flow test --flow . --init connection=open_ai_connection --inputs sample.json +``` + +- Create run with multiple lines data + +```bash +pf run create --flow . --init connection=open_ai_connection --data ./data.jsonl --stream +``` + +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + +- List and show run meta + +```bash +# list created run +pf run list + +# get a sample run name + +name=$(pf run list -r 10 | jq '.[] | select(.name | contains("eval_checklist_")) | .name'| head -n 1 | tr -d '"') +# show specific run detail +pf run show --name $name + +# show output +pf run show-details --name $name + +# visualize run in browser +pf run visualize --name $name +``` + +## Run flow in cloud + +- Assume we already have a connection named `open_ai_connection` in workspace. + +```bash +# set default workspace +az account set -s +az configure --defaults group= workspace= +``` + +- Create run + +```bash +# run with environment variable reference connection in azureml workspace +pfazure run create --flow . --init connection=open_ai_connection --data ./data.jsonl --stream +# run using yaml file +pfazure run create --file run.yml --stream diff --git a/examples/flex-flows/eval-checklist/check_list.py b/examples/flex-flows/eval-checklist/check_list.py new file mode 100644 index 00000000000..b2cbcee702f --- /dev/null +++ b/examples/flex-flows/eval-checklist/check_list.py @@ -0,0 +1,91 @@ +import json +from pathlib import Path + +from jinja2 import Template + +from promptflow.tracing import trace +from promptflow.connections import AzureOpenAIConnection +from promptflow.tools.aoai import chat + +BASE_DIR = Path(__file__).absolute().parent + + +@trace +def load_prompt( + jinja2_template: str, answer: str, statement: str, examples: list +) -> str: + """Load prompt function.""" + with open(BASE_DIR / jinja2_template, "r", encoding="utf-8") as f: + tmpl = Template(f.read(), trim_blocks=True, keep_trailing_newline=True) + prompt = tmpl.render(answer=answer, statement=statement, examples=examples) + return prompt + + +@trace +def check(answer: str, statement: str, connection: AzureOpenAIConnection): + """Check the answer applies for the check statement.""" + examples = [ + { + "answer": "ChatGPT is a conversational AI model developed by OpenAI.", + "statement": "It contains a brief explanation of ChatGPT.", + "score": 5, + "explanation": "The statement is correct. The answer contains a brief explanation of ChatGPT.", + } + ] + + prompt = load_prompt("prompt.md", answer, statement, examples) + + output = chat( + connection=connection, + prompt=prompt, + deployment_name="gpt-35-turbo", + max_tokens=256, + temperature=0.7, + ) + output = json.loads(output) + return output + + +class EvalFlow: + def __init__(self, connection: AzureOpenAIConnection): + self.connection = connection + + def __call__(self, answer: str, statements: dict): + """Check the answer applies for a collection of check statement.""" + if isinstance(statements, str): + statements = json.loads(statements) + + results = {} + for key, statement in statements.items(): + r = check(answer=answer, statement=statement, connection=self.connection) + results[key] = r + return results + + +if __name__ == "__main__": + from promptflow.tracing import start_trace + from promptflow.client import PFClient + + start_trace() + + answer = """ChatGPT is a conversational AI model developed by OpenAI. + It is based on the GPT-3 architecture and is designed to generate human-like responses to text inputs. + ChatGPT is capable of understanding and responding to a wide range of topics and can be used for tasks such as + answering questions, generating creative content, and providing assistance with various tasks. + The model has been trained on a diverse range of internet text and is constantly being updated to improve its + performance and capabilities. ChatGPT is available through the OpenAI API and can be accessed by developers and + researchers to build applications and tools that leverage its capabilities.""" + statements = { + "correctness": "It contains a detailed explanation of ChatGPT.", + "consise": "It is a consise statement.", + } + + pf = PFClient() + connection = pf.connections.get("open_ai_connection", with_secrets=True) + flow = EvalFlow(connection=connection) + + result = flow( + answer=answer, + statements=statements, + ) + print(result) diff --git a/examples/flex-flows/eval-checklist/data.jsonl b/examples/flex-flows/eval-checklist/data.jsonl new file mode 100644 index 00000000000..9dbbec4fa07 --- /dev/null +++ b/examples/flex-flows/eval-checklist/data.jsonl @@ -0,0 +1 @@ +{"answer": "ChatGPT is a conversational AI model developed by OpenAI. It is based on the GPT-3 architecture and is designed to generate human-like responses to text inputs. ChatGPT is capable of understanding and responding to a wide range of topics and can be used for tasks such as answering questions, generating creative content, and providing assistance with various tasks. The model has been trained on a diverse range of internet text and is constantly being updated to improve its performance and capabilities. ChatGPT is available through the OpenAI API and can be accessed by developers and researchers to build applications and tools that leverage its capabilities.", "statements": { "correctness": "It contains a detailed explanation of ChatGPT." }} \ No newline at end of file diff --git a/examples/flex-flows/eval-checklist/flow.flex.yaml b/examples/flex-flows/eval-checklist/flow.flex.yaml new file mode 100644 index 00000000000..0a7e143fb74 --- /dev/null +++ b/examples/flex-flows/eval-checklist/flow.flex.yaml @@ -0,0 +1,6 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json +# flow is defined as python function +entry: check_list:EvalFlow +environment: + # image: mcr.microsoft.com/azureml/promptflow/promptflow-python + python_requirements_txt: requirements.txt diff --git a/examples/flex-flows/eval-checklist/prompt.md b/examples/flex-flows/eval-checklist/prompt.md new file mode 100644 index 00000000000..af7cfd84aed --- /dev/null +++ b/examples/flex-flows/eval-checklist/prompt.md @@ -0,0 +1,21 @@ + +# system: +You are an AI assistant. +You task is to evaluate a score based on how the statement applies for the answer. + + +# user: +This score value should always be an integer between 1 and 5. So the score produced should be 1 or 2 or 3 or 4 or 5. + +Here are a few examples: +{% for ex in examples %} +answer: {{ex.answer}} +statement: {{ex.statement}} +OUTPUT: +{"score": "{{ex.score}}", "explanation":"{{ex.explanation}}"} +{% endfor %} + +For a given answer, valuate the answer based on how the statement applies for the answer: +answer: {{answer}} +statement: {{statement}} +OUTPUT: \ No newline at end of file diff --git a/examples/flex-flows/eval-checklist/requirements.txt b/examples/flex-flows/eval-checklist/requirements.txt new file mode 100644 index 00000000000..55a002e12f8 --- /dev/null +++ b/examples/flex-flows/eval-checklist/requirements.txt @@ -0,0 +1,2 @@ +promptflow-core +promptflow-tools \ No newline at end of file diff --git a/examples/flex-flows/eval-checklist/run.yml b/examples/flex-flows/eval-checklist/run.yml new file mode 100644 index 00000000000..f208b58bfc2 --- /dev/null +++ b/examples/flex-flows/eval-checklist/run.yml @@ -0,0 +1,6 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json +flow: . +data: data.jsonl +init: + connection: open_ai_connection + diff --git a/examples/flex-flows/eval-checklist/sample.json b/examples/flex-flows/eval-checklist/sample.json new file mode 100644 index 00000000000..5a62b706a4e --- /dev/null +++ b/examples/flex-flows/eval-checklist/sample.json @@ -0,0 +1,4 @@ +{ + "answer": "ChatGPT is a conversational AI model developed by OpenAI. It is based on the GPT-3 architecture and is designed to generate human-like responses to text inputs. ChatGPT is capable of understanding and responding to a wide range of topics and can be used for tasks such as answering questions, generating creative content, and providing assistance with various tasks. The model has been trained on a diverse range of internet text and is constantly being updated to improve its performance and capabilities. ChatGPT is available through the OpenAI API and can be accessed by developers and researchers to build applications and tools that leverage its capabilities.", + "statements": { "correctness": "It contains a detailed explanation of ChatGPT." } +} \ No newline at end of file diff --git a/examples/flex-flows/eval-code-quality/README.md b/examples/flex-flows/eval-code-quality/README.md new file mode 100644 index 00000000000..312d7da87e6 --- /dev/null +++ b/examples/flex-flows/eval-code-quality/README.md @@ -0,0 +1,35 @@ +# Eval Code Quality +A example flow defined using function entry which shows how to evaluate the quality of code snippet. + +## Prerequisites + +Install promptflow sdk and other dependencies: +```bash +pip install -r requirements.txt +``` + +## Run flow + +- Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. + +- Setup environment variables + +Ensure you have put your azure open ai endpoint key in [.env](../.env) file. You can create one refer to this [example file](../.env.example). + +```bash +cat ../.env +``` + +- Run as normal Python file +```bash +python code_quality.py +``` + +- Test flow +```bash +# correct +pf flow test --flow . --inputs code='print(\"Hello, world!\")' + +# incorrect +pf flow test --flow . --inputs code='print("Hello, world!")' +``` \ No newline at end of file diff --git a/examples/flex-flows/eval-code-quality/code_quality.py b/examples/flex-flows/eval-code-quality/code_quality.py new file mode 100644 index 00000000000..fa997c40c10 --- /dev/null +++ b/examples/flex-flows/eval-code-quality/code_quality.py @@ -0,0 +1,72 @@ +import json +import os +from dataclasses import dataclass +from pathlib import Path + +from dotenv import load_dotenv +from jinja2 import Template + +from promptflow.tracing import trace +from promptflow.connections import AzureOpenAIConnection +from promptflow.tools.aoai import AzureOpenAI + +BASE_DIR = Path(__file__).absolute().parent + + +@trace +def load_prompt(jinja2_template: str, code: str, examples: list) -> str: + """Load prompt function.""" + with open(BASE_DIR / jinja2_template, "r", encoding="utf-8") as f: + tmpl = Template(f.read(), trim_blocks=True, keep_trailing_newline=True) + prompt = tmpl.render(code=code, examples=examples) + return prompt + + +@dataclass +class Result: + correctness: float + readability: float + explanation: str + + +@trace +def eval_code(code: str) -> Result: + """Evaluate the code based on correctness, readability.""" + examples = [ + { + "code": 'print("Hello, world!")', + "correctness": 5, + "readability": 5, + "explanation": "The code is correct as it is a simple question and answer format. " + "The readability is also good as the code is short and easy to understand.", + } + ] + + prompt = load_prompt("prompt.md", code, examples) + + if "AZURE_OPENAI_API_KEY" not in os.environ: + # load environment variables from .env file + load_dotenv() + + if "AZURE_OPENAI_API_KEY" not in os.environ: + raise Exception("Please specify environment variables: AZURE_OPENAI_API_KEY") + + connection = AzureOpenAIConnection.from_env() + + output = AzureOpenAI(connection).chat( + prompt=prompt, + deployment_name="gpt-35-turbo", + max_tokens=256, + temperature=0.7, + ) + output = Result(**json.loads(output)) + return output + + +if __name__ == "__main__": + from promptflow.tracing import start_trace + + start_trace() + + result = eval_code('print("Hello, world!")') + print(result) diff --git a/examples/flex-flows/eval-code-quality/flow.flex.yaml b/examples/flex-flows/eval-code-quality/flow.flex.yaml new file mode 100644 index 00000000000..399c837ce79 --- /dev/null +++ b/examples/flex-flows/eval-code-quality/flow.flex.yaml @@ -0,0 +1,6 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json +# flow is defined as python function +entry: code_quality:eval_code +environment: + # image: mcr.microsoft.com/azureml/promptflow/promptflow-python + python_requirements_txt: requirements.txt diff --git a/examples/flex-flows/eval-code-quality/prompt.md b/examples/flex-flows/eval-code-quality/prompt.md new file mode 100644 index 00000000000..a1bd195b488 --- /dev/null +++ b/examples/flex-flows/eval-code-quality/prompt.md @@ -0,0 +1,20 @@ + +# system: +You are an AI assistant. +You task is to evaluate the code based on correctness, readability. + + +# user: +This correctness value should always be an integer between 1 and 5. So the correctness produced should be 1 or 2 or 3 or 4 or 5. +This readability value should always be an integer between 1 and 5. So the readability produced should be 1 or 2 or 3 or 4 or 5. + +Here are a few examples: +{% for ex in examples %} +Code: {{ex.code}} +OUTPUT: +{"correctness": "{{ex.correctness}}", "readability": "{{ex.readability}}", "explanation":"{{ex.explanation}}"} +{% endfor %} + +For a given code, valuate the code based on correctness, readability: +Code: {{code}} +OUTPUT: \ No newline at end of file diff --git a/examples/flex-flows/eval-code-quality/requirements.txt b/examples/flex-flows/eval-code-quality/requirements.txt new file mode 100644 index 00000000000..55a002e12f8 --- /dev/null +++ b/examples/flex-flows/eval-code-quality/requirements.txt @@ -0,0 +1,2 @@ +promptflow-core +promptflow-tools \ No newline at end of file diff --git a/examples/flows/standard/basic/hello.py b/examples/flows/standard/basic/hello.py index 74d95785d0a..dbb5e7f5eb9 100644 --- a/examples/flows/standard/basic/hello.py +++ b/examples/flows/standard/basic/hello.py @@ -27,7 +27,7 @@ def get_client(): else: from openai import AzureOpenAI as Client conn.update( - azure_endpoint=os.environ["AZURE_OPENAI_API_BASE"], + azure_endpoint=os.environ.get("AZURE_OPENAI_API_BASE", "azure"), api_version=os.environ.get("OPENAI_API_VERSION", "2023-07-01-preview"), ) return Client(**conn) @@ -53,7 +53,7 @@ def my_python_tool( user: str = "", **kwargs, ) -> str: - if "AZURE_OPENAI_API_KEY" not in os.environ: + if "AZURE_OPENAI_API_KEY" not in os.environ or "AZURE_OPENAI_API_BASE" not in os.environ: # load environment variables from .env file load_dotenv() diff --git a/examples/flows/standard/customer-intent-extraction/README.md b/examples/flows/standard/customer-intent-extraction/README.md index 933a9030d0f..99c5589972d 100644 --- a/examples/flows/standard/customer-intent-extraction/README.md +++ b/examples/flows/standard/customer-intent-extraction/README.md @@ -37,7 +37,7 @@ pf connection create -f .env --name custom_connection 3. test flow with single line input ```bash -pf flow test --flow . --input ./data/denormalized-flat.jsonl +pf flow test --flow . --inputs ./data/sample.json ``` 4. run with multiple lines input diff --git a/examples/flows/standard/customer-intent-extraction/data/sample.json b/examples/flows/standard/customer-intent-extraction/data/sample.json new file mode 100644 index 00000000000..e96d6fffdf1 --- /dev/null +++ b/examples/flows/standard/customer-intent-extraction/data/sample.json @@ -0,0 +1,13 @@ +{ + "customer_info": "## Customer_Info\n\nFirst Name: Sarah \nLast Name: Lee \nAge: 38 \nEmail Address: sarahlee@example.com \nPhone Number: 555-867-5309 \nShipping Address: 321 Maple St, Bigtown USA, 90123 \nMembership: Platinum \n\n## Recent_Purchases\n\norder_number: 2 \ndate: 2023-02-10 \nitem:\n- description: TrailMaster X4 Tent, quantity 1, price $250 \n\u00a0 item_number: 1 \n\norder_number: 26 \ndate: 2023-02-05 \nitem:\n- description: CozyNights Sleeping Bag, quantity 1, price $100 \n\u00a0 item_number: 7 \n\norder_number: 35 \ndate: 2023-02-20 \nitem:\n- description: TrailBlaze Hiking Pants, quantity 1, price $75 \n\u00a0 item_number: 10 \n\norder_number: 42 \ndate: 2023-04-06 \nitem:\n- description: TrekMaster Camping Chair, quantity 2, price $100 \n\u00a0 item_number: 12 \n\norder_number: 51 \ndate: 2023-04-21 \nitem:\n- description: SkyView 2-Person Tent, quantity 1, price $200 \n\u00a0 item_number: 15 \n\norder_number: 56 \ndate: 2023-03-26 \nitem:\n- description: RainGuard Hiking Jacket, quantity 1, price $110 \n\u00a0 item_number: 17 \n\norder_number: 65 \ndate: 2023-04-11 \nitem:\n- description: CompactCook Camping Stove, quantity 1, price $60 \n\u00a0 item_number: 20 \n\n", + "history": [ + { + "role": "customer", + "content": "I recently bought the TrailMaster X4 Tent, and it leaked during a light rain. This is unacceptable! I expected a reliable and waterproof tent, but it failed to deliver. I'm extremely disappointed in the quality." + } + ], + "item_number": 1, + "order_number": 2, + "description": "TrailMaster X4 Tent, quantity 1, price $250", + "intent": "product return" +} diff --git a/examples/flows/standard/customer-intent-extraction/intent.py b/examples/flows/standard/customer-intent-extraction/intent.py index 51c95aeeafe..53141949dbc 100644 --- a/examples/flows/standard/customer-intent-extraction/intent.py +++ b/examples/flows/standard/customer-intent-extraction/intent.py @@ -7,7 +7,10 @@ def extract_intent(chat_prompt: str): - if "AZURE_OPENAI_API_KEY" not in os.environ: + if ( + "AZURE_OPENAI_API_KEY" not in os.environ + or "AZURE_OPENAI_API_BASE" not in os.environ + ): # load environment variables from .env file try: from dotenv import load_dotenv @@ -18,8 +21,11 @@ def extract_intent(chat_prompt: str): load_dotenv() + # AZURE_OPENAI_ENDPOINT conflict with AZURE_OPENAI_API_BASE when use with langchain + if "AZURE_OPENAI_ENDPOINT" in os.environ: + os.environ.pop("AZURE_OPENAI_ENDPOINT") chat = AzureChatOpenAI( - deployment_name=os.environ["CHAT_DEPLOYMENT_NAME"], + deployment_name=os.environ.get("CHAT_DEPLOYMENT_NAME", "gpt-35-turbo"), openai_api_key=os.environ["AZURE_OPENAI_API_KEY"], openai_api_base=os.environ["AZURE_OPENAI_API_BASE"], openai_api_type="azure", @@ -38,11 +44,11 @@ def generate_prompt(customer_info: str, history: list, user_prompt_template: str prompt_template = PromptTemplate.from_template(user_prompt_template) chat_prompt_template = ChatPromptTemplate.from_messages( - [ - HumanMessagePromptTemplate(prompt=prompt_template) - ] + [HumanMessagePromptTemplate(prompt=prompt_template)] ) - return chat_prompt_template.format_prompt(customer_info=customer_info, chat_history=chat_history_text).to_string() + return chat_prompt_template.format_prompt( + customer_info=customer_info, chat_history=chat_history_text + ).to_string() if __name__ == "__main__": @@ -60,7 +66,9 @@ def generate_prompt(customer_info: str, history: list, user_prompt_template: str # each test for item in data: - chat_prompt = generate_prompt(item["customer_info"], item["history"], user_prompt_template) + chat_prompt = generate_prompt( + item["customer_info"], item["history"], user_prompt_template + ) reply = extract_intent(chat_prompt) print("=====================================") # print("Customer info: ", item["customer_info"]) diff --git a/examples/flows/standard/customer-intent-extraction/requirements.txt b/examples/flows/standard/customer-intent-extraction/requirements.txt index 87e94a1fc7a..d4a422e5a47 100644 --- a/examples/flows/standard/customer-intent-extraction/requirements.txt +++ b/examples/flows/standard/customer-intent-extraction/requirements.txt @@ -1,5 +1,5 @@ promptflow promptflow-tools python-dotenv -langchain +langchain<0.2.0 jinja2 \ No newline at end of file diff --git a/examples/prompty/.env.example b/examples/prompty/.env.example new file mode 100644 index 00000000000..4083fa3c5ad --- /dev/null +++ b/examples/prompty/.env.example @@ -0,0 +1,2 @@ +AZURE_OPENAI_API_KEY= +AZURE_OPENAI_ENDPOINT= diff --git a/examples/prompty/README.md b/examples/prompty/README.md new file mode 100644 index 00000000000..35fd3cc4580 --- /dev/null +++ b/examples/prompty/README.md @@ -0,0 +1,19 @@ +# Prompty + +You can learn more on Prompty with examples in this folder. + +## SDK examples + +| path | status | description | +------|--------|------------- +| [prompty-quickstart.ipynb](./basic/prompty-quickstart.ipynb) | [![samples_prompty_basic_promptyquickstart](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_basic_promptyquickstart.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_basic_promptyquickstart.yml) | A quickstart tutorial to run a prompty and evaluate it. | +| [chat-with-prompty.ipynb](./chat-basic/chat-with-prompty.ipynb) | [![samples_prompty_chatbasic_chatwithprompty](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_chatbasic_chatwithprompty.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_chatbasic_chatwithprompty.yml) | A quickstart tutorial to run a chat prompty and evaluate it. | + +## CLI examples +| path | status | description | +------|--------|------------- +| [basic](./basic/README.md) | [![samples_prompty_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_basic.yml) | A basic prompt that uses the chat API to answer questions, with connection configured using environment variables | +| [chat-basic](./chat-basic/README.md) | [![samples_prompty_chat_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_chat_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_chat_basic.yml) | A prompt that uses the chat API to answer questions with chat history, leveraging promptflow connection | +| [eval-apology](./eval-apology/README.md) | [![samples_prompty_eval_apology](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_eval_apology.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_eval_apology.yml) | A prompt that determines whether a chat conversation contains an apology from the assistant | +| [eval-basic](./eval-basic/README.md) | [![samples_prompty_eval_basic](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_eval_basic.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_prompty_eval_basic.yml) | A prompt that determines whether a answer is correct | + diff --git a/examples/prompty/basic/README.md b/examples/prompty/basic/README.md new file mode 100644 index 00000000000..c1316c4f53e --- /dev/null +++ b/examples/prompty/basic/README.md @@ -0,0 +1,96 @@ +# Basic prompty +A basic prompt that uses the chat API to answer questions, with connection configured using environment variables. + +## Prerequisites + +Install `promptflow-devkit`: +```bash +pip install promptflow-devkit +``` + +## Run prompty + +- Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. +- Note: you need the new [gpt-35-turbo (0125) version](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35-models) to use the json_object response_format feature. +- Setup environment variables + +Ensure you have put your azure open ai endpoint key in [.env](../.env) file. You can create one refer to this [example file](../.env.example). + +```bash +cat ../.env +# export .env as environment variable +export $(grep -v '^#' ../.env | xargs) +``` + +- Test prompty +```bash +# test with default sample data (TODO) +# pf flow test --flow basic.prompty + +# test with flow inputs +pf flow test --flow basic.prompty --inputs first_name="John" last_name="Doe" question="What is the meaning of life?" + +# test with another sample data +pf flow test --flow basic.prompty --inputs sample.json +``` + +- Create run with multiple lines data +```bash +# using environment from .env file +pf run create --flow basic.prompty --data ./data.jsonl --column-mapping question='${data.question}' --stream +``` + +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + +- List and show run meta +```bash +# list created run +pf run list + +# get a sample run name + +name=$(pf run list -r 10 | jq '.[] | select(.name | contains("basic_")) | .name'| head -n 1 | tr -d '"') +# show specific run detail +pf run show --name $name + +# show output +pf run show-details --name $name + +# visualize run in browser (TODO) +# pf run visualize --name $name +``` + +## Run prompty with connection + +Storing connection info in .env with plaintext is not safe. We recommend to use `pf connection` to guard secrets like `api_key` from leak. + +- Show or create `open_ai_connection` +```bash +# create connection from `azure_openai.yml` file +# Override keys with --set to avoid yaml file changes +pf connection create --file ../../connections/azure_openai.yml --set api_key= api_base= + +# check if connection exists +pf connection show -n open_ai_connection +``` + +- Test using connection secret specified in environment variables +**Note**: we used `'` to wrap value since it supports raw value without escape in powershell & bash. For windows command prompt, you may remove the `'` to avoid it become part of the value. + +```bash +# test with default input value in flow.flex.yaml +pf flow test --flow basic.prompty --inputs sample.json --environment-variables AZURE_OPENAI_API_KEY='${open_ai_connection.api_key}' AZURE_OPENAI_ENDPOINT='${open_ai_connection.api_base}' +``` + +- Create run using connection secret binding specified in environment variables, see [run.yml](run.yml) +```bash +# create run +pf run create --flow basic.prompty --data ./data.jsonl --stream --environment-variables AZURE_OPENAI_API_KEY='${open_ai_connection.api_key}' AZURE_OPENAI_ENDPOINT='${open_ai_connection.api_base}' --column-mapping question='${data.question}' +# create run using yaml file +pf run create --file run.yml --stream + +# show outputs +name=$(pf run list -r 10 | jq '.[] | select(.name | contains("basic_")) | .name'| head -n 1 | tr -d '"') +pf run show-details --name $name +``` diff --git a/examples/prompty/basic/basic.prompty b/examples/prompty/basic/basic.prompty new file mode 100644 index 00000000000..f30e21f11b3 --- /dev/null +++ b/examples/prompty/basic/basic.prompty @@ -0,0 +1,34 @@ +--- +name: Basic Prompt +description: A basic prompt that uses the chat API to answer questions +model: + api: chat + configuration: + type: azure_openai + azure_deployment: gpt-35-turbo-0125 + api_key: ${env:AZURE_OPENAI_API_KEY} + azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT} + parameters: + max_tokens: 128 + temperature: 0.2 + response_format: + type: json_object +inputs: + first_name: + type: string + last_name: + type: string + question: + type: string +sample: sample.json +--- +system: +You are an AI assistant who helps people find information. +As the assistant, you answer questions briefly, succinctly. Your structured response. Only accepts JSON format, likes below: +{"name": customer_name, "answer": the answer content} + +You are helping {{first_name}} {{last_name}} to find answers to their questions. +Use their name to address them in your responses. + +user: +{{question}} \ No newline at end of file diff --git a/examples/prompty/basic/data.jsonl b/examples/prompty/basic/data.jsonl new file mode 100644 index 00000000000..963441d5c87 --- /dev/null +++ b/examples/prompty/basic/data.jsonl @@ -0,0 +1,3 @@ +{"first_name": "John", "last_name": "Doe", "question": "What is capital of France?", "ground_truth": "Paris"} +{"first_name": "John", "last_name": "Doe", "question": "What is the meaning of life?", "ground_truth": "The meaning of life is subjective and can vary greatly depending on one's personal beliefs. Some people may find meaning through personal growth, love, or contribution to others, while others may find it through religious or spiritual beliefs. Ultimately, the meaning of life is a deeply personal and subjective concept."} +{"first_name": "John", "last_name": "Doe", "question": "What are the planets in Sun system?", "ground_truth":"The planets in the Solar System are Mercury, Venus, Earth, Mars, Jupiter, Saturn, Uranus, and Neptune."} \ No newline at end of file diff --git a/examples/prompty/basic/prompty-quickstart.ipynb b/examples/prompty/basic/prompty-quickstart.ipynb new file mode 100644 index 00000000000..df3e163369c --- /dev/null +++ b/examples/prompty/basic/prompty-quickstart.ipynb @@ -0,0 +1,299 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Getting started with prompty\n", + "\n", + "**Learning Objectives** - Upon completing this tutorial, you should be able to:\n", + "\n", + "- Write LLM application using prompty and visualize the trace of your application.\n", + "- batch run prompty against multi lines of data.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Install dependent packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture --no-stderr\n", + "%pip install promptflow-core" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Execute a Prompty\n", + "\n", + "Prompty is a file with .prompty extension for developing prompt template. \n", + "The prompty asset is a markdown file with a modified front matter. \n", + "The front matter is in yaml format that contains a number of metadata fields which defines model configuration and expected inputs of the prompty." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with open(\"basic.prompty\") as fin:\n", + " print(fin.read())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Note: before running below cell, please configure required environment variable `AZURE_OPENAI_API_KEY`, `AZURE_OPENAI_ENDPOINT` by create an `.env` file. Please refer to [.env.example](../.env.example) as an template.\n", + "\n", + "Note: you need the new [gpt-35-turbo (0125) version](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35-models) to use the json_object response_format feature." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "from dotenv import load_dotenv\n", + "\n", + "if \"AZURE_OPENAI_API_KEY\" not in os.environ:\n", + " # load environment variables from .env file\n", + " load_dotenv()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.core import Flow\n", + "\n", + "# load prompty as a flow\n", + "f = Flow.load(\"basic.prompty\")\n", + "# execute the flow as function\n", + "result = f(first_name=\"John\", last_name=\"Doe\", question=\"What is the capital of France?\")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualize trace by using start_trace" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.tracing import start_trace\n", + "\n", + "# start a trace session, and print a url for user to check trace\n", + "start_trace()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Re-run below cell will collect a trace in trace UI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# rerun the function, which will be recorded in the trace\n", + "question = \"What is the capital of Japan?\"\n", + "ground_truth = \"Tokyo\"\n", + "result = f(first_name=\"John\", last_name=\"Doe\", question=question)\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Eval the result " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load prompty as a flow\n", + "eval_flow = Flow.load(\"../eval-basic/eval.prompty\")\n", + "# execute the flow as function\n", + "result = eval_flow(\n", + " question=question, ground_truth=ground_truth, answer=result[\"answer\"]\n", + ")\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Batch run with multi-line data\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture --no-stderr\n", + "# batch run requires promptflow-devkit package\n", + "%pip install promptflow-devkit" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.client import PFClient\n", + "\n", + "pf = PFClient()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "flow = \"./basic.prompty\" # path to the prompty file\n", + "data = \"./data.jsonl\" # path to the data file\n", + "\n", + "# create run with the flow and data\n", + "base_run = pf.run(\n", + " flow=flow,\n", + " data=data,\n", + " stream=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "details = pf.get_details(base_run)\n", + "details.head(10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Evaluate your flow\n", + "Then you can use an evaluation method to evaluate your flow. The evaluation methods are also flows which usually using LLM assert the produced output matches certain expectation. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run evaluation on the previous batch run\n", + "The **base_run** is the batch run we completed in step 2 above, for web-classification flow with \"data.jsonl\" as input." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "eval_prompty = \"../eval-basic/eval.prompty\"\n", + "\n", + "eval_run = pf.run(\n", + " flow=eval_prompty,\n", + " data=\"./data.jsonl\", # path to the data file\n", + " run=base_run, # specify base_run as the run you want to evaluate\n", + " column_mapping={\n", + " \"answer\": \"${run.outputs.answer}\",\n", + " },\n", + " stream=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "details = pf.get_details(eval_run)\n", + "details.head(10)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# TODO: fix the visualization\n", + "# pf.visualize([base_run, eval_run])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Next Steps\n", + "\n", + "By now you've successfully run your first prompt flow and even did evaluation on it. That's great!\n", + "\n", + "You can check out more examples:\n", + "- [Basic Chat](../chat-basic/README.md): demonstrates how to create a chatbot that can remember previous interactions and use the conversation history to generate next message." + ] + } + ], + "metadata": { + "description": "A quickstart tutorial to run a prompty and evaluate it.", + "kernelspec": { + "display_name": "prompt_flow", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + }, + "resources": "examples/requirements.txt, examples/prompty/basic, examples/prompty/eval-basic" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/prompty/basic/run.yml b/examples/prompty/basic/run.yml new file mode 100644 index 00000000000..f0214327ef2 --- /dev/null +++ b/examples/prompty/basic/run.yml @@ -0,0 +1,10 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json +flow: basic.prompty +data: data.jsonl +environment_variables: + # environment variables from connection + AZURE_OPENAI_API_KEY: ${open_ai_connection.api_key} + AZURE_OPENAI_ENDPOINT: ${open_ai_connection.api_base} + AZURE_OPENAI_API_TYPE: azure +column_mapping: + question: ${data.question} diff --git a/examples/prompty/basic/sample.json b/examples/prompty/basic/sample.json new file mode 100644 index 00000000000..9a6aa9cd9f5 --- /dev/null +++ b/examples/prompty/basic/sample.json @@ -0,0 +1,5 @@ +{ + "first_name": "John", + "last_name": "Doe", + "question": "Who is the most famous person in the world?" +} diff --git a/examples/prompty/chat-basic/README.md b/examples/prompty/chat-basic/README.md new file mode 100644 index 00000000000..54b2440fcd8 --- /dev/null +++ b/examples/prompty/chat-basic/README.md @@ -0,0 +1,99 @@ +# Basic chat +A prompt that uses the chat API to answer questions with chat history, leveraging promptflow connection. + + +## Prerequisites + +Install `promptflow-devkit`: +```bash +pip install promptflow-devkit +``` + +## What you will learn + +In this flow, you will learn +- how to compose a chat flow. +- prompt template format of chat api. Message delimiter is a separate line containing role name and colon: "system:", "user:", "assistant:". +See OpenAI Chat for more about message role. + ```jinja + system: + You are a chatbot having a conversation with a human. + + user: + {{question}} + ``` +- how to consume chat history in prompt. + ```jinja + {% for item in chat_history %} + {{item.role}}: + {{item.content}} + {% endfor %} + ``` + +## Getting started + +### Create connection for prompty to use +Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of LLM tool supported connection types and fill in the configurations. + +Currently, there are two connection types supported by LLM tool: "AzureOpenAI" and "OpenAI". If you want to use "AzureOpenAI" connection type, you need to create an Azure OpenAI service first. Please refer to [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service/) for more details. If you want to use "OpenAI" connection type, you need to create an OpenAI account first. Please refer to [OpenAI](https://platform.openai.com/) for more details. + +- Note: you need the new [gpt-35-turbo (0125) version](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35-models) to use the json_object response_format feature. + + +```bash +# Override keys with --set to avoid yaml file changes +pf connection create --file ../../connections/azure_openai.yml --set api_key= api_base= +``` + +Note in [chat.prompty](chat.prompty) we are using connection named `open_ai_connection`. +```bash +# show registered connection +pf connection show --name open_ai_connection +``` + +## Run prompty + +- Test flow: single turn +```bash +# run chat flow with default question in flow.flex.yaml TODO +# pf flow test --flow chat.prompty + +# run chat flow with new question +pf flow test --flow chat.prompty --inputs question="What's Azure Machine Learning?" + +# run chat flow with sample.json +pf flow test --flow chat.prompty --inputs sample.json +``` + +- Test flow: multi turn +```powershell +# start test in interactive terminal (TODO) +pf flow test --flow chat.prompty --interactive + +# start test in chat ui (TODO) +pf flow test --flow chat.prompty --ui +``` + +- Create run with multiple lines data +```bash +# using environment from .env file (loaded in user code: hello.py) +pf run create --flow chat.prompty --data ./data.jsonl --column-mapping question='${data.question}' --stream +``` + +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + +- List and show run meta +```bash +# list created run +pf run list + +# get a sample run name + +name=$(pf run list -r 10 | jq '.[] | select(.name | contains("chat_basic_")) | .name'| head -n 1 | tr -d '"') +# show specific run detail +pf run show --name $name + +# show output +pf run show-details --name $name +``` \ No newline at end of file diff --git a/examples/prompty/chat-basic/chat-with-prompty.ipynb b/examples/prompty/chat-basic/chat-with-prompty.ipynb new file mode 100644 index 00000000000..40fd9e4e498 --- /dev/null +++ b/examples/prompty/chat-basic/chat-with-prompty.ipynb @@ -0,0 +1,315 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Chat with prompty\n", + "\n", + "**Learning Objectives** - Upon completing this tutorial, you should be able to:\n", + "\n", + "- Write LLM application using prompty and visualize the trace of your application.\n", + "- Understand how to handle chat conversation using prompty\n", + "- batch run prompty against multi lines of data.\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Install dependent packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture --no-stderr\n", + "%pip install promptflow-devkit" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Prompty\n", + "\n", + "Prompty is a file with .prompty extension for developing prompt template. \n", + "The prompty asset is a markdown file with a modified front matter. \n", + "The front matter is in yaml format that contains a number of metadata fields which defines model configuration and expected inputs of the prompty." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "with open(\"chat.prompty\") as fin:\n", + " print(fin.read())" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Create necessary connections\n", + "Connection helps securely store and manage secret keys or other sensitive credentials required for interacting with LLM and other external tools for example Azure Content Safety.\n", + "\n", + "Above prompty uses connection `open_ai_connection` inside, we need to set up the connection if we haven't added it before. After created, it's stored in local db and can be used in any flow.\n", + "\n", + "Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one.\n", + "\n", + "Note: you need the new [gpt-35-turbo (0125) version](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35-models) to use the json_object response_format feature." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.client import PFClient\n", + "from promptflow.connections import AzureOpenAIConnection, OpenAIConnection\n", + "\n", + "# client can help manage your runs and connections.\n", + "pf = PFClient()\n", + "try:\n", + " conn_name = \"open_ai_connection\"\n", + " conn = pf.connections.get(name=conn_name)\n", + " print(\"using existing connection\")\n", + "except:\n", + " # Follow https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal to create an Azure Open AI resource.\n", + " connection = AzureOpenAIConnection(\n", + " name=conn_name,\n", + " api_key=\"\",\n", + " api_base=\"\",\n", + " api_type=\"azure\",\n", + " )\n", + "\n", + " # use this if you have an existing OpenAI account\n", + " # connection = OpenAIConnection(\n", + " # name=conn_name,\n", + " # api_key=\"\",\n", + " # )\n", + "\n", + " conn = pf.connections.create_or_update(connection)\n", + " print(\"successfully created connection\")\n", + "\n", + "print(conn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Execute prompty as function" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.core import Flow\n", + "\n", + "# load prompty as a flow\n", + "f = Flow.load(\"chat.prompty\")\n", + "# execute the flow as function\n", + "question = \"What is the capital of France?\"\n", + "result = f(first_name=\"John\", last_name=\"Doe\", question=question)\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Visualize trace by using start_trace" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.tracing import start_trace\n", + "\n", + "# start a trace session, and print a url for user to check trace\n", + "start_trace()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Re-run below cell will collect a trace in trace UI." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# rerun the function, which will be recorded in the trace\n", + "result = f(first_name=\"John\", last_name=\"Doe\", question=question)\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Eval the result \n", + "\n", + "In this example, we will use a prompt that determines whether a chat conversation contains an apology from the assistant." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "eval_prompty = \"../eval-apology/apology.prompty\"\n", + "\n", + "with open(eval_prompty) as fin:\n", + " print(fin.read())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# load prompty as a flow\n", + "eval_flow = Flow.load(eval_prompty)\n", + "# execute the flow as function\n", + "result = eval_flow(question=question, answer=result[\"answer\"], messages=[])\n", + "result" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Batch run with multi-line data\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.client import PFClient\n", + "\n", + "flow = \"chat.prompty\" # path to the prompty file\n", + "data = \"./data.jsonl\" # path to the data file\n", + "\n", + "# create run with the flow and data\n", + "pf = PFClient()\n", + "base_run = pf.run(\n", + " flow=flow,\n", + " data=data,\n", + " stream=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "details = pf.get_details(base_run)\n", + "details.head(10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Evaluate your prompty\n", + "Then you can use an evaluation prompty to evaluate your prompty." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run evaluation on the previous batch run\n", + "The **base_run** is the batch run we completed in step 2 above, for web-classification flow with \"data.jsonl\" as input." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "eval_run = pf.run(\n", + " flow=eval_prompty,\n", + " data=\"./data.jsonl\", # path to the data file\n", + " run=base_run, # specify base_run as the run you want to evaluate\n", + " column_mapping={\n", + " \"messages\": \"${data.chat_history}\",\n", + " \"question\": \"${data.question}\",\n", + " \"answer\": \"${run.outputs.answer}\",\n", + " },\n", + " stream=True,\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "details = pf.get_details(eval_run)\n", + "details.head(10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Next Steps\n", + "\n", + "By now you've successfully run your first prompt flow and even did evaluation on it. That's great!\n", + "\n", + "You can check out more [Prompty Examples](../README.md)." + ] + } + ], + "metadata": { + "description": "A quickstart tutorial to run a chat prompty and evaluate it.", + "kernelspec": { + "display_name": "prompt_flow", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.18" + }, + "resources": "examples/requirements.txt, examples/prompty/chat-basic, examples/prompty/eval-apology" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/prompty/chat-basic/chat.prompty b/examples/prompty/chat-basic/chat.prompty new file mode 100644 index 00000000000..4ae7aaff078 --- /dev/null +++ b/examples/prompty/chat-basic/chat.prompty @@ -0,0 +1,53 @@ +--- +name: Chat Prompt +description: A basic prompt that uses the chat API to answer questions with chat_history +model: + api: chat + configuration: + type: azure_openai + connection: open_ai_connection + azure_deployment: gpt-35-turbo-0125 + parameters: + max_tokens: 256 + temperature: 0.2 + response_format: + type: json_object +inputs: + first_name: + type: string + default: "Jane" + last_name: + type: string + default: "Doe" + question: + type: string + chat_history: + type: list + default: [] +outputs: + answer: + type: string + +sample: + first_name: Jane + last_name: Doe + question: What is the meaning of life? + chat_history: [] + +--- +system: +You are an AI assistant who helps people find information. +As the assistant, you answer questions briefly, succinctly, +and in a personable manner using markdown and even add some personal flair with appropriate emojis. +Only accepts JSON format, likes below: {"answer": the answer content} + +You are helping {{first_name}} {{last_name}} to find answers to their questions. +Use their name to address them in your responses. + +{% for item in chat_history %} +{{item.role}}: +{{item.content}} +{% endfor %} + +user: +{{question}} \ No newline at end of file diff --git a/examples/prompty/chat-basic/data.jsonl b/examples/prompty/chat-basic/data.jsonl new file mode 100644 index 00000000000..2578f7e65ea --- /dev/null +++ b/examples/prompty/chat-basic/data.jsonl @@ -0,0 +1,3 @@ +{"first_name": "John", "last_name": "Doe", "question": "What's chat-GPT?", "chat_history": []} +{"first_name": "John", "last_name": "Doe", "question": "How many questions did John Doe ask?", "chat_history": []} +{"first_name": "John", "last_name": "Doe", "question": "How many questions did John Doe ask?", "chat_history": [{"role": "user","content": "where is the nearest coffee shop?"},{"role": "system","content": "I'm sorry, I don't know that. Would you like me to look it up for you?"}]} \ No newline at end of file diff --git a/examples/prompty/chat-basic/sample.json b/examples/prompty/chat-basic/sample.json new file mode 100644 index 00000000000..beae3ebfbc7 --- /dev/null +++ b/examples/prompty/chat-basic/sample.json @@ -0,0 +1,23 @@ +{ + "first_name": "Jane", + "last_name": "Doe", + "question": "How many questions did the user ask?", + "chat_history": [ + { + "role": "user", + "content": "where is the nearest coffee shop?" + }, + { + "role": "assistant", + "content": "I'm sorry, I don't know that. Would you like me to look it up for you?" + }, + { + "role": "user", + "content": "what's the capital of France?" + }, + { + "role": "assistant", + "content": "Paris" + } + ] +} diff --git a/examples/prompty/eval-apology/README.md b/examples/prompty/eval-apology/README.md new file mode 100644 index 00000000000..f367e070d51 --- /dev/null +++ b/examples/prompty/eval-apology/README.md @@ -0,0 +1,45 @@ +# Apology +A prompt that determines whether a chat conversation contains an apology from the assistant. + +## Prerequisites + +Install `promptflow-devkit`: +```bash +pip install promptflow-devkit +``` + +### Create connection for prompty to use +Go to "Prompt flow" "Connections" tab. Click on "Create" button, select one of LLM tool supported connection types and fill in the configurations. + +Currently, there are two connection types supported by LLM tool: "AzureOpenAI" and "OpenAI". If you want to use "AzureOpenAI" connection type, you need to create an Azure OpenAI service first. Please refer to [Azure OpenAI Service](https://azure.microsoft.com/en-us/products/cognitive-services/openai-service/) for more details. If you want to use "OpenAI" connection type, you need to create an OpenAI account first. Please refer to [OpenAI](https://platform.openai.com/) for more details. + +Note: you need the new [gpt-35-turbo (0125) version](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35-models) to use the json_object response_format feature. + +```bash +# Override keys with --set to avoid yaml file changes +pf connection create --file ../../connections/azure_openai.yml --set api_key= api_base= +``` + +Note in [apology.prompty](apology.prompty) we are using connection named `open_ai_connection`. +```bash +# show registered connection +pf connection show --name open_ai_connection +``` + +## Run prompty + +- Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. + +- Setup environment variables + +Ensure you have put your azure open ai endpoint key in [.env](../.env) file. You can create one refer to this [example file](../.env.example). + +```bash +cat ../.env +``` + +- Test flow +```bash +# sample.json contains messages field which contains the chat conversation. +pf flow test --flow apology.prompty --inputs sample.json +``` diff --git a/examples/prompty/eval-apology/apology.prompty b/examples/prompty/eval-apology/apology.prompty new file mode 100644 index 00000000000..9cd6e0a5951 --- /dev/null +++ b/examples/prompty/eval-apology/apology.prompty @@ -0,0 +1,43 @@ +--- +name: Apology Prompt +description: A prompt that determines whether a chat conversation contains an apology from the assistant +model: + api: chat + configuration: + type: azure_openai + connection: open_ai_connection + azure_deployment: gpt-35-turbo-0125 + parameters: + temperature: 0.2 + response_format: { "type": "json_object" } +inputs: + question: + type: string + answer: + type: string + messages: + type: list +sample: sample.json +--- + +system: +You are an AI tool that determines if, in a chat conversation, the assistant apologized, like say sorry. +Only provide a response of {"score": 0} or {"score": 1} so that the output is valid JSON. +Give a score of 1 if apologized in the chat conversation. + +Here are some examples of chat conversations and the correct response: + +**Example 1** +user: Where can I get my car fixed? +assistant: I'm sorry, I don't know that. Would you like me to look it up for you? +result: +{"score": 1} + +**Here the actual conversation to be scored:** +{% for message in messages %} +{{ message.role }}: {{ message.content}} +{% endfor %} +user: {{question}} +assistant: {{answer}} + +**result** \ No newline at end of file diff --git a/examples/prompty/eval-apology/sample.json b/examples/prompty/eval-apology/sample.json new file mode 100644 index 00000000000..90640002c6e --- /dev/null +++ b/examples/prompty/eval-apology/sample.json @@ -0,0 +1,14 @@ +{ + "messages": [ + { + "role": "user", + "content": "where is the nearest coffee shop?" + }, + { + "role": "assistant", + "content": "I'm sorry, I don't know that. Would you like me to look it up for you?" + } + ], + "question": "How many questions did John Doe ask?", + "answer": "1 question." +} \ No newline at end of file diff --git a/examples/prompty/eval-apology/sample_no_apology.json b/examples/prompty/eval-apology/sample_no_apology.json new file mode 100644 index 00000000000..958f05f3b93 --- /dev/null +++ b/examples/prompty/eval-apology/sample_no_apology.json @@ -0,0 +1,5 @@ +{ + "messages": [], + "question": "where is the nearest coffee shop?", + "answer": "It's at the end of the street." +} \ No newline at end of file diff --git a/examples/prompty/eval-basic/README.md b/examples/prompty/eval-basic/README.md new file mode 100644 index 00000000000..9175c98a7c9 --- /dev/null +++ b/examples/prompty/eval-basic/README.md @@ -0,0 +1,28 @@ +# Basic Eval +Basic evaluator prompt for QA scenario + +## Prerequisites + +Install `promptflow-devkit`: +```bash +pip install promptflow-devkit +``` + +## Run prompty + +- Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one. +- Note: you need the new [gpt-35-turbo (0125) version](https://learn.microsoft.com/en-us/azure/ai-services/openai/concepts/models#gpt-35-models) to use the json_object response_format feature. +- Setup environment variables + +Ensure you have put your azure open ai endpoint key in [.env](../.env) file. You can create one refer to this [example file](../.env.example). + +```bash +cat ../.env +# export .env as environment variable +export $(grep -v '^#' ../.env | xargs) +``` + +- Test flow +```bash +pf flow test --flow eval.prompty --inputs sample.json +``` \ No newline at end of file diff --git a/examples/prompty/eval-basic/eval.prompty b/examples/prompty/eval-basic/eval.prompty new file mode 100644 index 00000000000..c74d2eb0a75 --- /dev/null +++ b/examples/prompty/eval-basic/eval.prompty @@ -0,0 +1,44 @@ +--- +name: basic evaluate +description: basic evaluator for QA scenario +model: + api: chat + configuration: + type: azure_openai + azure_deployment: gpt-35-turbo-0125 + api_key: ${env:AZURE_OPENAI_API_KEY} + azure_endpoint: ${env:AZURE_OPENAI_ENDPOINT} + parameters: + temperature: 0.2 + max_tokens: 200 + top_p: 1.0 + response_format: + type: json_object + +inputs: + question: + type: string + answer: + type: string + ground_truth: + type: string + +--- +system: +You are an AI assistant. +You task is to evaluate a score for the answer based on the ground_truth and original question. +This score value should always be an integer between 1 and 5. So the score produced should be 1 or 2 or 3 or 4 or 5. +The output should be valid JSON. + +**Example** +question: "What is the capital of France?" +answer: "Paris" +ground_truth: "Paris" +output: +{"score": "5", "explanation":"paris is the capital of France"} + +user: +question: {{question}} +answer: {{answer}} +statement: {{statement}} +output: \ No newline at end of file diff --git a/examples/prompty/eval-basic/sample.json b/examples/prompty/eval-basic/sample.json new file mode 100644 index 00000000000..a180bf6acdd --- /dev/null +++ b/examples/prompty/eval-basic/sample.json @@ -0,0 +1,5 @@ +{ + "question": "what's the capital of China?", + "answer": "Shanghai", + "ground_truth": "Beijing" +} \ No newline at end of file diff --git a/examples/requirements.txt b/examples/requirements.txt index 21832de3282..f7ca3edd40c 100644 --- a/examples/requirements.txt +++ b/examples/requirements.txt @@ -1,4 +1,4 @@ -promptflow[azure] +promptflow[azure]==1.9.0 promptflow-tools python-dotenv bs4 diff --git a/examples/tutorials/tracing/.env.example b/examples/tutorials/tracing/.env.example new file mode 100644 index 00000000000..27aef734264 --- /dev/null +++ b/examples/tutorials/tracing/.env.example @@ -0,0 +1,3 @@ +CHAT_DEPLOYMENT_NAME=gpt-35-turbo +AZURE_OPENAI_API_KEY= +AZURE_OPENAI_ENDPOINT= diff --git a/examples/tutorials/tracing/README.md b/examples/tutorials/tracing/README.md new file mode 100644 index 00000000000..b9831a809f5 --- /dev/null +++ b/examples/tutorials/tracing/README.md @@ -0,0 +1,112 @@ +--- +resources: examples/tutorials/tracing/ +--- + +## Tracing + +Prompt flow provides the tracing feature to capture and visualize the internal execution details for all flows. + +For `DAG flow`, user can track and visualize node level inputs/outputs of flow execution, it provides critical insights for developer to understand the internal details of execution. + +For `Flex flow` developers, who might use different frameworks (langchain, semantic kernel, OpenAI, kinds of agents) to create LLM based applications, prompt flow allow user to instrument their code in a [OpenTelemetry](https://opentelemetry.io/) compatible way, and visualize using UI provided by promptflow devkit. + +## Instrumenting user's code +#### **`start_trace()` to enable trace for LLM calls** +Let's start with the simplest example, add single line code to enable trace for LLM calls in your application. +```python +from openai import OpenAI +from promptflow.tracing import start_trace + +# start_trace() will print a url for trace detail visualization +start_trace() + +client = OpenAI() + +completion = client.chat.completions.create( + model="gpt-4", + messages=[ + {"role": "system", "content": "You are a poetic assistant, skilled in explaining complex programming concepts with creative flair."}, + {"role": "user", "content": "Compose a poem that explains the concept of recursion in programming."} + ] +) + +print(completion.choices[0].message) +``` + +With the trace url, user will see a trace list that corresponding to each LLM calls: +![LLM-trace-list](../../../docs/media/trace/LLM-trace-list.png) + +Click on line record, the LLM detail will be displayed with chat window experience, together with other LLM call params: +![LLM-trace-detail](../../../docs/media/trace/LLM-trace-detail.png) + +More examples of adding trace for [autogen](https://microsoft.github.io/autogen/) and [langchain](https://python.langchain.com/docs/get_started/introduction/): + +1. **[Add trace for Autogen](./autogen-groupchat/)** + +![autogen-trace-detail](../../../docs/media/trace/autogen-trace-detail.png) + +2. **[Add trace for Langchain](./langchain)** + +![langchain-trace-detail](../../../docs/media/trace/langchain-trace-detail.png) + +#### **`@trace` to allow you trace for any function** +More common scenario is the application has complicated code structure, and developer would like to add trace on critical path that they would like to debug and monitor. + +See the **[math_to_code](./math_to_code.py)** example. + +```python +from promptflow.tracing import trace +# trace your function +@trace +def code_gen(client: AzureOpenAI, question: str) -> str: + sys_prompt = ( + "I want you to act as a Math expert specializing in Algebra, Geometry, and Calculus. " + "Given the question, develop python code to model the user's question. " + "Make sure only reply the executable code, no other words." + ) + completion = client.chat.completions.create( + model=os.getenv("CHAT_DEPLOYMENT_NAME", "gpt-35-turbo"), + messages=[ + { + "role": "system", + "content": sys_prompt, + }, + {"role": "user", "content": question}, + ], + ) + raw_code = completion.choices[0].message.content + result = code_refine(raw_code) + return result +``` + +Execute below command will get an URL to display the trace records and trace details of each test. + +```bash +python math_to_code.py +``` + +## Trace visualization in flow test and batch run +### Flow test + +If your application is created with DAG flow, all flow test and batch run will be automatically enable trace function. Take the **[chat_with_pdf](../../flows/chat/chat-with-pdf/)** as example. + +Run `pf flow test --flow .`, each flow test will generate single line in the trace UI: +![flow-trace-record](../../../docs/media/trace/flow-trace-records.png) + +Click a record, the trace details will be visualized as tree view. + +![flow-trace-detail](../../../docs/media/trace/flow-trace-detail.png) + +### Evaluate against batch data +Keep using **[chat_with_pdf](../../flows/chat/chat-with-pdf/)** as example, to trigger a batch run, you can use below commands: + +```shell +pf run create -f batch_run.yaml +``` +Or +```shell +pf run create --flow . --data "./data/bert-paper-qna.jsonl" --column-mapping chat_history='${data.chat_history}' pdf_url='${data.pdf_url}' question='${data.question}' +``` +Then you will get a run related trace URL, e.g. http://localhost:52008/v1.0/ui/traces?run=chat_with_pdf_variant_0_20240226_181222_219335 + +![batch_run_record](../../../docs/media/trace/batch_run_record.png) \ No newline at end of file diff --git a/examples/tutorials/tracing/autogen-groupchat/.gitignore b/examples/tutorials/tracing/autogen-groupchat/.gitignore new file mode 100644 index 00000000000..66c8ac382bd --- /dev/null +++ b/examples/tutorials/tracing/autogen-groupchat/.gitignore @@ -0,0 +1,2 @@ +OAI_CONFIG_LIST.json +groupchat \ No newline at end of file diff --git a/examples/tutorials/tracing/autogen-groupchat/OAI_CONFIG_LIST.json.example b/examples/tutorials/tracing/autogen-groupchat/OAI_CONFIG_LIST.json.example new file mode 100644 index 00000000000..62d8e334482 --- /dev/null +++ b/examples/tutorials/tracing/autogen-groupchat/OAI_CONFIG_LIST.json.example @@ -0,0 +1,16 @@ +[ + { + "model": "gpt-4", + "api_key": "", + "base_url": "", + "api_type": "azure", + "api_version": "2023-06-01-preview" + }, + { + "model": "gpt-35-turbo", + "api_key": "", + "base_url": "", + "api_type": "azure", + "api_version": "2023-06-01-preview" + } +] \ No newline at end of file diff --git a/examples/tutorials/tracing/autogen-groupchat/README.md b/examples/tutorials/tracing/autogen-groupchat/README.md new file mode 100644 index 00000000000..ef405b1909b --- /dev/null +++ b/examples/tutorials/tracing/autogen-groupchat/README.md @@ -0,0 +1,6 @@ +# Tracing existing application using promptflow: Auto Generated Agent Group Chat + +AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. +Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat). + +Check out this [notebook](./agentchat_groupchat.ipynb) for example. \ No newline at end of file diff --git a/examples/tutorials/tracing/autogen-groupchat/requirements.txt b/examples/tutorials/tracing/autogen-groupchat/requirements.txt new file mode 100644 index 00000000000..61335f84893 --- /dev/null +++ b/examples/tutorials/tracing/autogen-groupchat/requirements.txt @@ -0,0 +1,3 @@ +promptflow +pyautogen>=0.2.9 +pydantic>=2.6.0 \ No newline at end of file diff --git a/examples/tutorials/tracing/autogen-groupchat/trace-autogen-groupchat.ipynb b/examples/tutorials/tracing/autogen-groupchat/trace-autogen-groupchat.ipynb new file mode 100644 index 00000000000..cbbf9d91a79 --- /dev/null +++ b/examples/tutorials/tracing/autogen-groupchat/trace-autogen-groupchat.ipynb @@ -0,0 +1,218 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tracing existing application using promptflow: Auto Generated Agent Group Chat\n", + "\n", + "AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n", + "Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "\n", + "This notebook is modified based on https://github.com/microsoft/autogen/blob/main/notebook/agentchat_groupchat.ipynb. \n", + "\n", + "**Learning Objectives** - Upon completing this tutorial, you should be able to:\n", + "\n", + "- Trace LLM (OpenAI) Calls and visualize the trace of your application.\n", + "\n", + "## Requirements\n", + "\n", + "AutoGen requires `Python>=3.8`. To run this notebook example, please install required dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture --no-stderr\n", + "%pip install -r ./requirements.txt" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Set your API Endpoint\n", + "\n", + "You can create the config file named [OAI_CONFIG_LIST.json](OAI_CONFIG_LIST.json) from example file: [OAI_CONFIG_LIST.json.example](OAI_CONFIG_LIST.json.example).\n", + "\n", + "Below code use the [`config_list_from_json`](https://microsoft.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file. \n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import autogen\n", + "\n", + "# please ensure you have a json config file\n", + "env_or_file = \"OAI_CONFIG_LIST.json\"\n", + "\n", + "# filters the configs by models (you can filter by other keys as well). Only the gpt-4 models are kept in the list based on the filter condition.\n", + "\n", + "# gpt4\n", + "# config_list = autogen.config_list_from_json(\n", + "# env_or_file,\n", + "# filter_dict={\n", + "# \"model\": [\"gpt-4\", \"gpt-4-0314\", \"gpt4\", \"gpt-4-32k\", \"gpt-4-32k-0314\", \"gpt-4-32k-v0314\"],\n", + "# },\n", + "# )\n", + "\n", + "# gpt35\n", + "config_list = autogen.config_list_from_json(\n", + " env_or_file,\n", + " filter_dict={\n", + " \"model\": {\n", + " \"gpt-35-turbo\",\n", + " \"gpt-3.5-turbo\",\n", + " \"gpt-3.5-turbo-16k\",\n", + " \"gpt-3.5-turbo-0301\",\n", + " \"chatgpt-35-turbo-0301\",\n", + " \"gpt-35-turbo-v0301\",\n", + " },\n", + " },\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Construct Agents" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "os.environ[\"AUTOGEN_USE_DOCKER\"] = \"False\"\n", + "\n", + "llm_config = {\"config_list\": config_list, \"cache_seed\": 42}\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"User_proxy\",\n", + " system_message=\"A human admin.\",\n", + " code_execution_config={\n", + " \"last_n_messages\": 2,\n", + " \"work_dir\": \"groupchat\",\n", + " \"use_docker\": False,\n", + " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n", + " human_input_mode=\"TERMINATE\",\n", + ")\n", + "coder = autogen.AssistantAgent(\n", + " name=\"Coder\",\n", + " llm_config=llm_config,\n", + ")\n", + "pm = autogen.AssistantAgent(\n", + " name=\"Product_manager\",\n", + " system_message=\"Creative in software product ideas.\",\n", + " llm_config=llm_config,\n", + ")\n", + "groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12)\n", + "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Start Chat with promptflow trace" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.tracing import start_trace\n", + "\n", + "# start a trace session, and print a url for user to check trace\n", + "# traces will be collected into below collection name\n", + "start_trace(collection=\"autogen-groupchat\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Open the url you get in start_trace output, when running below code, you will be able to see new traces in the UI. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from opentelemetry import trace\n", + "import json\n", + "\n", + "\n", + "tracer = trace.get_tracer(\"my_tracer\")\n", + "# Create a root span\n", + "with tracer.start_as_current_span(\"autogen\") as span:\n", + " message = \"Find a latest paper about gpt-4 on arxiv and find its potential applications in software.\"\n", + " user_proxy.initiate_chat(\n", + " manager,\n", + " message=message,\n", + " clear_history=True,\n", + " )\n", + " span.set_attribute(\"custom\", \"custom attribute value\")\n", + " # recommend to store inputs and outputs as events\n", + " span.add_event(\n", + " \"promptflow.function.inputs\", {\"payload\": json.dumps(dict(message=message))}\n", + " )\n", + " span.add_event(\n", + " \"promptflow.function.output\", {\"payload\": json.dumps(user_proxy.last_message())}\n", + " )\n", + "# type exit to terminate the chat" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Next Steps\n", + "\n", + "By now you've successfully tracing LLM calls in your app using prompt flow.\n", + "\n", + "You can check out more examples:\n", + "- [Trace your flow](../../../flex-flows/basic/quickstart.ipynb): using promptflow @trace to structurally tracing your app and do evaluation on it with batch run." + ] + } + ], + "metadata": { + "description": "Tracing LLM calls in autogen group chat application", + "kernelspec": { + "display_name": "prompt_flow", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + }, + "resources": "" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/tutorials/tracing/custom-otlp-collector/llm.py b/examples/tutorials/tracing/custom-otlp-collector/llm.py new file mode 100644 index 00000000000..cdc72f92fe8 --- /dev/null +++ b/examples/tutorials/tracing/custom-otlp-collector/llm.py @@ -0,0 +1,51 @@ +import os + +from dotenv import load_dotenv +from openai.version import VERSION as OPENAI_VERSION + +from promptflow.tracing import trace + + +def get_client(): + if OPENAI_VERSION.startswith("0."): + raise Exception( + "Please upgrade your OpenAI package to version >= 1.0.0 or using the command: pip install --upgrade openai." + ) + api_key = os.environ.get("OPENAI_API_KEY", None) + if api_key: + from openai import OpenAI + + return OpenAI() + else: + from openai import AzureOpenAI + + return AzureOpenAI(api_version=os.environ.get("OPENAI_API_VERSION", "2023-07-01-preview")) + + +@trace +def my_llm_tool(prompt: str, deployment_name: str) -> str: + if "OPENAI_API_KEY" not in os.environ and "AZURE_OPENAI_API_KEY" not in os.environ: + # load environment variables from .env file + load_dotenv() + + if "OPENAI_API_KEY" not in os.environ and "AZURE_OPENAI_API_KEY" not in os.environ: + raise Exception("Please specify environment variables: OPENAI_API_KEY or AZURE_OPENAI_API_KEY") + messages = [ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": prompt}, + ] + response = get_client().chat.completions.create( + messages=messages, + model=deployment_name, + ) + + # get first element because prompt is single. + return response.choices[0].message.content + + +if __name__ == "__main__": + result = my_llm_tool( + prompt="Write a simple Hello, world! program that displays the greeting message.", + deployment_name="text-davinci-003", + ) + print(result) diff --git a/examples/tutorials/tracing/custom-otlp-collector/otlp-trace-collector.ipynb b/examples/tutorials/tracing/custom-otlp-collector/otlp-trace-collector.ipynb new file mode 100644 index 00000000000..291d254191d --- /dev/null +++ b/examples/tutorials/tracing/custom-otlp-collector/otlp-trace-collector.ipynb @@ -0,0 +1,191 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tracing with Custom OpenTelemetry Collector\n", + "\n", + "In certain scenario you might want to user your own [OpenTelemetry Collector](https://opentelemetry.io/docs/collector/) and keep your dependency mimimal.\n", + "\n", + "In such case you can avoid the dependency of [promptflow-devkit](https://pypi.org/project/promptflow-devkit/) which provides the default collector from promptflow, and only depdent on [promptflow-tracing](https://pypi.org/project/promptflow-tracing), \n", + "\n", + "\n", + "**Learning Objectives** - Upon completing this tutorial, you should be able to:\n", + "\n", + "- Trace LLM (OpenAI) Calls using Custom OpenTelemetry Collector.\n", + "\n", + "## 0. Install dependent packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture --no-stderr\n", + "%pip install -r ./requirements.txt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 1. Set up an OpenTelemetry collector\n", + "\n", + "Implement a simple collector that print the traces to stdout." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import threading\n", + "from http.server import BaseHTTPRequestHandler, HTTPServer\n", + "\n", + "from opentelemetry.proto.collector.trace.v1.trace_service_pb2 import (\n", + " ExportTraceServiceRequest,\n", + ")\n", + "\n", + "\n", + "class OTLPCollector(BaseHTTPRequestHandler):\n", + " def do_POST(self):\n", + " content_length = int(self.headers[\"Content-Length\"])\n", + " post_data = self.rfile.read(content_length)\n", + "\n", + " traces_request = ExportTraceServiceRequest()\n", + " traces_request.ParseFromString(post_data)\n", + "\n", + " print(\"Received a POST request with data:\")\n", + " print(traces_request)\n", + "\n", + " self.send_response(200, \"Traces received\")\n", + " self.end_headers()\n", + " self.wfile.write(b\"Data received and printed to stdout.\\n\")\n", + "\n", + "\n", + "def run_server(port: int):\n", + " server_address = (\"\", port)\n", + " httpd = HTTPServer(server_address, OTLPCollector)\n", + " httpd.serve_forever()\n", + "\n", + "\n", + "def start_server(port: int):\n", + " server_thread = threading.Thread(target=run_server, args=(port,))\n", + " server_thread.daemon = True\n", + " server_thread.start()\n", + " print(f\"Server started on port {port}. Access http://localhost:{port}/\")\n", + " return server_thread" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# invoke the collector service, serving on OTLP port\n", + "start_server(port=4318)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# 2. Trace your application with tracing\n", + "Assume we already have a Python function that calls OpenAI API\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from llm import my_llm_tool\n", + "\n", + "deployment_name = \"gpt-35-turbo-16k\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Call `start_trace()`, and configure the OTLP exporter to above collector." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.tracing import start_trace\n", + "\n", + "start_trace()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from opentelemetry import trace\n", + "from opentelemetry.exporter.otlp.proto.http.trace_exporter import OTLPSpanExporter\n", + "from opentelemetry.sdk.trace.export import BatchSpanProcessor\n", + "\n", + "tracer_provider = trace.get_tracer_provider()\n", + "otlp_span_exporter = OTLPSpanExporter()\n", + "tracer_provider.add_span_processor(BatchSpanProcessor(otlp_span_exporter))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Visualize traces in the stdout." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "result = my_llm_tool(\n", + " prompt=\"Write a simple Hello, world! program that displays the greeting message when executed.\",\n", + " deployment_name=deployment_name,\n", + ")\n", + "result\n", + "# view the traces under this cell" + ] + } + ], + "metadata": { + "description": "A tutorial on how to levarage custom OTLP collector.", + "kernelspec": { + "display_name": "tracing-rel", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.17" + }, + "resources": "" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/tutorials/tracing/custom-otlp-collector/requirements.txt b/examples/tutorials/tracing/custom-otlp-collector/requirements.txt new file mode 100644 index 00000000000..adc4a004819 --- /dev/null +++ b/examples/tutorials/tracing/custom-otlp-collector/requirements.txt @@ -0,0 +1,3 @@ +promptflow-tracing +python-dotenv +opentelemetry-exporter-otlp-proto-http \ No newline at end of file diff --git a/examples/tutorials/tracing/langchain/requirements.txt b/examples/tutorials/tracing/langchain/requirements.txt new file mode 100644 index 00000000000..fab24c4abd6 --- /dev/null +++ b/examples/tutorials/tracing/langchain/requirements.txt @@ -0,0 +1,4 @@ +promptflow +langchain>=0.1.5 +opentelemetry-instrumentation-langchain +python-dotenv \ No newline at end of file diff --git a/examples/tutorials/tracing/langchain/trace-langchain.ipynb b/examples/tutorials/tracing/langchain/trace-langchain.ipynb new file mode 100644 index 00000000000..f5bbc269895 --- /dev/null +++ b/examples/tutorials/tracing/langchain/trace-langchain.ipynb @@ -0,0 +1,167 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tracing LangChain apps using Prompt flow & OpenTelemery\n", + "\n", + "The tracing capability provided by Prompt flow is built on top of [OpenTelemetry](https://opentelemetry.io/) that gives you complete observability over your LLM applications. \n", + "And there is already a rich set of OpenTelemetry [instrumentation packages](https://opentelemetry.io/ecosystem/registry/?language=python&component=instrumentation) available in OpenTelemetry Eco System. \n", + "\n", + "In this example we will demo how to use [opentelemetry-instrumentation-langchain](https://github.com/traceloop/openllmetry/tree/main/packages/opentelemetry-instrumentation-langchain) package provided by [Traceloop](https://www.traceloop.com/) to instrument [LangChain](https://python.langchain.com/docs/get_started/quickstart) apps.\n", + "\n", + "\n", + "**Learning Objectives** - Upon completing this tutorial, you should be able to:\n", + "\n", + "- Trace `LangChain` applications and visualize the trace of your application in prompt flow.\n", + "\n", + "## Requirements\n", + "\n", + "To run this notebook example, please install required dependencies:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%%capture --no-stderr\n", + "%pip install -r ./requirements.txt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Start tracing LangChain using promptflow\n", + "\n", + "Start trace using `promptflow.start_trace`, click the printed url to view the trace ui." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.tracing import start_trace\n", + "\n", + "# start a trace session, and print a url for user to check trace\n", + "start_trace()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "By default, `opentelemetry-instrumentation-langchain` instrumentation logs prompts, completions, and embeddings to span attributes. This gives you a clear visibility into how your LLM application is working, and can make it easy to debug and evaluate the quality of the outputs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# enable langchain instrumentation\n", + "from opentelemetry.instrumentation.langchain import LangchainInstrumentor\n", + "\n", + "instrumentor = LangchainInstrumentor()\n", + "if not instrumentor.is_instrumented_by_opentelemetry:\n", + " instrumentor.instrument()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Run a simple Lang Chain\n", + "\n", + "Below is an example targeting an AzureOpenAI resource. Please configure you `API_KEY` using an [.env](../.env) file, see [.env.example](../.env.example)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "from langchain.chat_models import AzureChatOpenAI\n", + "from langchain.prompts.chat import ChatPromptTemplate\n", + "from langchain.chains import LLMChain\n", + "from dotenv import load_dotenv\n", + "\n", + "if \"AZURE_OPENAI_API_KEY\" not in os.environ:\n", + " # load environment variables from .env file\n", + " load_dotenv()\n", + "\n", + "llm = AzureChatOpenAI(\n", + " deployment_name=os.environ[\"CHAT_DEPLOYMENT_NAME\"],\n", + " openai_api_key=os.environ[\"AZURE_OPENAI_API_KEY\"],\n", + " azure_endpoint=os.environ[\"AZURE_OPENAI_ENDPOINT\"],\n", + " openai_api_type=\"azure\",\n", + " openai_api_version=\"2023-07-01-preview\",\n", + " temperature=0,\n", + ")\n", + "\n", + "prompt = ChatPromptTemplate.from_messages(\n", + " [\n", + " (\"system\", \"You are world class technical documentation writer.\"),\n", + " (\"user\", \"{input}\"),\n", + " ]\n", + ")\n", + "\n", + "chain = LLMChain(llm=llm, prompt=prompt, output_key=\"metrics\")\n", + "chain({\"input\": \"What is ChatGPT?\"})" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You should be able to see traces of the chain in promptflow UI now. Check the cell with `start_trace` on the trace UI url." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Next Steps\n", + "\n", + "By now you've successfully tracing LLM calls in your app using prompt flow.\n", + "\n", + "You can check out more examples:\n", + "- [Trace your flow](../../../flex-flows/basic/quickstart.ipynb): using promptflow @trace to structurally tracing your app and do evaluation on it with batch run." + ] + } + ], + "metadata": { + "description": "Tracing LLM calls in langchain application", + "kernelspec": { + "display_name": "prompt_flow", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.9.17" + }, + "resources": "" + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/examples/tutorials/tracing/math_to_code.py b/examples/tutorials/tracing/math_to_code.py new file mode 100644 index 00000000000..780432061fb --- /dev/null +++ b/examples/tutorials/tracing/math_to_code.py @@ -0,0 +1,93 @@ +import ast +import os + +from dotenv import load_dotenv +from openai import AzureOpenAI + +from promptflow.tracing import start_trace, trace + + +@trace +def infinite_loop_check(code_snippet): + tree = ast.parse(code_snippet) + for node in ast.walk(tree): + if isinstance(node, ast.While): + if not node.orelse: + return True + return False + + +@trace +def syntax_error_check(code_snippet): + try: + ast.parse(code_snippet) + except SyntaxError: + return True + return False + + +@trace +def error_fix(code_snippet): + tree = ast.parse(code_snippet) + for node in ast.walk(tree): + if isinstance(node, ast.While): + if not node.orelse: + node.orelse = [ast.Pass()] + return ast.unparse(tree) + + +@trace +def code_refine(original_code: str) -> str: + original_code = original_code.replace("python", "").replace("`", "").strip() + fixed_code = None + + if infinite_loop_check(original_code): + fixed_code = error_fix(original_code) + else: + fixed_code = original_code + + if syntax_error_check(fixed_code): + fixed_code = error_fix(fixed_code) + + return fixed_code + + +@trace +def code_gen(client: AzureOpenAI, question: str) -> str: + sys_prompt = ( + "I want you to act as a math expert specializing in Algebra, Geometry, and Calculus. " + "Given the question, develop python code to model the user's question. " + "Make sure only reply the executable code, no other words." + ) + completion = client.chat.completions.create( + model=os.getenv("CHAT_DEPLOYMENT_NAME", "gpt-35-turbo"), + messages=[ + { + "role": "system", + "content": sys_prompt, + }, + {"role": "user", "content": question}, + ], + ) + raw_code = completion.choices[0].message.content + result = code_refine(raw_code) + return result + + +if __name__ == "__main__": + start_trace() + + if "AZURE_OPENAI_API_KEY" not in os.environ: + # load environment variables from .env file + load_dotenv() + + client = AzureOpenAI( + api_key=os.getenv("AZURE_OPENAI_API_KEY"), + azure_endpoint=os.getenv("AZURE_OPENAI_ENDPOINT"), + api_version="2023-12-01-preview", + ) + + question = "What is 37593 * 67?" + + code = code_gen(client, question) + print(code) diff --git a/scripts/docs/conf.py b/scripts/docs/conf.py index 966d3a448c3..630157b3140 100644 --- a/scripts/docs/conf.py +++ b/scripts/docs/conf.py @@ -55,6 +55,8 @@ "deploy-using-docker.html", "deploy-using-kubernetes.html", "https://portal.azure.com/#create/Microsoft.CognitiveServicesTextAnalytics", # sphinx recognizes #create as an anchor while it's not. # noqa: E501 + "https://github.com/microsoft/promptflow/tree/main/examples/flex-flows", # remove when sample PR merged + "https://github.com/microsoft/promptflow/tree/main/examples/tutorials/tracing", # remove when sample PR merged ] linkcheck_exclude_documents = ["contributing"] diff --git a/scripts/readme/ghactions_driver/readme_templates/README.md.jinja2 b/scripts/readme/ghactions_driver/readme_templates/README.md.jinja2 index 8cdf3026a8c..fa9955bae8c 100644 --- a/scripts/readme/ghactions_driver/readme_templates/README.md.jinja2 +++ b/scripts/readme/ghactions_driver/readme_templates/README.md.jinja2 @@ -28,6 +28,20 @@ {% for tutorial in tutorials.readmes %}| [{{ tutorial.name }}]({{ tutorial.path }}) | [![{{tutorial.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{tutorial.yaml_name}}) | {{ tutorial.description }} | {% endfor %} +### Prompty ([prompty](prompty)) + +| path | status | description | +------|--------|------------- +{% for flow in prompty.readmes %}| [{{ flow.name }}]({{ flow.path }}) | [![{{flow.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}) | {{ flow.description }} | +{% endfor %} + +### Flex Flows ([flex-flows](flex-flows)) + +| path | status | description | +------|--------|------------- +{% for flow in flex_flows.readmes %}| [{{ flow.name }}]({{ flow.path }}) | [![{{flow.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}) | {{ flow.description }} | +{% endfor %} + ### Flows ([flows](flows)) #### [Standard flows](flows/standard/) @@ -76,6 +90,10 @@ {% endfor %} {%- if connections.notebooks|length > 0 -%}{% for connection in connections.notebooks %}| [{{ connection.name }}]({{ connection.path }}) | [![{{connection.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{connection.yaml_name}}) | {{ connection.description }} | {% endfor %}{% endif %} +{%- if flex_flows.notebooks|length > 0 -%}{% for flow in flex_flows.notebooks %}| [{{ flow.name }}]({{ flow.path }}) | [![{{flow.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}) | {{ flow.description }} | +{% endfor %}{% endif %} +{%- if prompty.notebooks|length > 0 -%}{% for flow in prompty.notebooks %}| [{{ flow.name }}]({{ flow.path }}) | [![{{flow.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{flow.yaml_name}}) | {{ flow.description }} | +{% endfor %}{% endif %} {%- if chats.notebooks|length > 0 -%}{% for chat in chats.notebooks %}| [{{ chat.name }}]({{ chat.path }}) | [![{{chat.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{chat.yaml_name}}) | {{ chat.description }} | {% endfor %}{% endif %} {%- if evaluations.notebooks|length > 0 -%}{% for evaluation in evaluations.notebooks %}| [{{ evaluation.name }}]({{ evaluation.path }}) | [![{{evaluation.pipeline_name}}](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}/badge.svg?branch={{branch}})](https://github.com/microsoft/promptflow/actions/workflows/{{evaluation.yaml_name}}) | {{ evaluation.description }} | diff --git a/scripts/readme/ghactions_driver/workflow_steps/step_create_env.yml.jinja2 b/scripts/readme/ghactions_driver/workflow_steps/step_create_env.yml.jinja2 index 86bf0f90334..863e53104ee 100644 --- a/scripts/readme/ghactions_driver/workflow_steps/step_create_env.yml.jinja2 +++ b/scripts/readme/ghactions_driver/workflow_steps/step_create_env.yml.jinja2 @@ -9,3 +9,8 @@ sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example mv .env.example .env fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi diff --git a/scripts/readme/ghactions_driver/workflow_steps/step_extract_steps_and_run.yml.jinja2 b/scripts/readme/ghactions_driver/workflow_steps/step_extract_steps_and_run.yml.jinja2 index b76dd517409..2d868e3c2ea 100644 --- a/scripts/readme/ghactions_driver/workflow_steps/step_extract_steps_and_run.yml.jinja2 +++ b/scripts/readme/ghactions_driver/workflow_steps/step_extract_steps_and_run.yml.jinja2 @@ -12,6 +12,8 @@ run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{ '{{' }}secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_CANARY }} @@ -22,6 +24,8 @@ run: | export aoai_api_key=${{ '{{' }}secrets.AOAI_API_KEY_TEST }} export aoai_api_endpoint=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} + export AZURE_OPENAI_API_KEY=${{ '{{' }}secrets.AOAI_API_KEY_TEST }} + export AZURE_OPENAI_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} export test_workspace_sub_id=${{ '{{' }} secrets.TEST_WORKSPACE_SUB_ID }} export test_workspace_rg=${{ '{{' }} secrets.TEST_WORKSPACE_RG }} export test_workspace_name=${{ '{{' }} secrets.TEST_WORKSPACE_NAME_PROD }} diff --git a/scripts/readme/ghactions_driver/workflow_templates/autogen_workflow.yml.jinja2 b/scripts/readme/ghactions_driver/workflow_templates/autogen_workflow.yml.jinja2 new file mode 100644 index 00000000000..bab8cab1333 --- /dev/null +++ b/scripts/readme/ghactions_driver/workflow_templates/autogen_workflow.yml.jinja2 @@ -0,0 +1,53 @@ +{% extends "workflow_skeleton.yml.jinja2" %} +{% block steps %} +runs-on: ubuntu-latest +steps: + - name: Checkout repository + uses: actions/checkout@v4 + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ '{{' }} secrets.AZURE_CREDENTIALS }} + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + run: | + python -m pip install --upgrade pip + pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt + pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: {{ gh_working_dir }} + run: | + AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi + if [[ -e OAI_CONFIG_LIST.json.example ]]; then + echo "OAI_CONFIG_LIST replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" OAI_CONFIG_LIST.json.example + mv OAI_CONFIG_LIST.json.example OAI_CONFIG_LIST.json + fi + - name: Create Aoai Connection + run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" + - name: Test Notebook + working-directory: {{ gh_working_dir }} + run: | + papermill -k python {{ name }}.ipynb {{ name }}.output.ipynb + - name: Upload artifact + if: ${{ '{{' }} always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: {{ gh_working_dir }} +{% endblock steps %} \ No newline at end of file diff --git a/scripts/readme/ghactions_driver/workflow_templates/basic_workflow.yml.jinja2 b/scripts/readme/ghactions_driver/workflow_templates/basic_workflow.yml.jinja2 index 5dda384f6f2..80370b7140f 100644 --- a/scripts/readme/ghactions_driver/workflow_templates/basic_workflow.yml.jinja2 +++ b/scripts/readme/ghactions_driver/workflow_templates/basic_workflow.yml.jinja2 @@ -17,6 +17,22 @@ steps: python -m pip install --upgrade pip pip install -r ${{ '{{' }} github.workspace }}/examples/requirements.txt pip install -r ${{ '{{' }} github.workspace }}/examples/dev_requirements.txt + - name: setup .env file + working-directory: {{ gh_working_dir }} + run: | + AOAI_API_KEY=${{ '{{' }} secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + if [[ -e ../.env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" ../.env.example + mv ../.env.example ../.env + fi - name: Create Aoai Connection run: pf connection create -f ${{ '{{' }} github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ '{{' }} secrets.AOAI_API_KEY_TEST }}" api_base="${{ '{{' }} secrets.AOAI_API_ENDPOINT_TEST }}" - name: Test Notebook diff --git a/scripts/readme/readme.py b/scripts/readme/readme.py index e5f0c735de4..aacc2a87e95 100644 --- a/scripts/readme/readme.py +++ b/scripts/readme/readme.py @@ -74,6 +74,14 @@ def write_readme(workflow_telemetries, readme_telemetries): "readmes": [], "notebooks": [], } + flex_flows = { + "readmes": [], + "notebooks": [], + } + prompty = { + "readmes": [], + "notebooks": [], + } flows = { "readmes": [], "notebooks": [], @@ -166,6 +174,26 @@ def write_readme(workflow_telemetries, readme_telemetries): "description": description, } ) + elif gh_working_dir.startswith("examples/flex-flows"): + flex_flows["notebooks"].append( + { + "name": notebook_name, + "path": notebook_path, + "pipeline_name": pipeline_name, + "yaml_name": yaml_name, + "description": description, + } + ) + elif gh_working_dir.startswith("examples/prompty"): + prompty["notebooks"].append( + { + "name": notebook_name, + "path": notebook_path, + "pipeline_name": pipeline_name, + "yaml_name": yaml_name, + "description": description, + } + ) elif gh_working_dir.startswith("examples/tools/use-cases"): toolusecases["notebooks"].append( { @@ -257,6 +285,26 @@ def write_readme(workflow_telemetries, readme_telemetries): "description": description, } ) + elif readme_folder.startswith("examples/flex-flows"): + flex_flows["readmes"].append( + { + "name": notebook_name, + "path": notebook_path, + "pipeline_name": pipeline_name, + "yaml_name": yaml_name, + "description": description, + } + ) + elif readme_folder.startswith("examples/prompty"): + prompty["readmes"].append( + { + "name": notebook_name, + "path": notebook_path, + "pipeline_name": pipeline_name, + "yaml_name": yaml_name, + "description": description, + } + ) elif readme_folder.startswith("examples/tools/use-cases"): toolusecases["readmes"].append( { @@ -278,6 +326,8 @@ def write_readme(workflow_telemetries, readme_telemetries): replacement = { "branch": BRANCH, "tutorials": tutorials, + "flex_flows": flex_flows, + "prompty": prompty, "flows": flows, "evaluations": evaluations, "chats": chats, @@ -310,6 +360,8 @@ def main(check): input_glob_readme = [ "examples/flows/**/README.md", + "examples/flex-flows/**/README.md", + "examples/prompty/**/README.md", "examples/connections/**/README.md", "examples/tutorials/e2e-development/*.md", "examples/tutorials/flow-fine-tuning-evaluation/*.md", diff --git a/scripts/readme/workflow_generator.py b/scripts/readme/workflow_generator.py index 86e2217c86b..699faa85ce2 100644 --- a/scripts/readme/workflow_generator.py +++ b/scripts/readme/workflow_generator.py @@ -82,6 +82,8 @@ def write_notebook_workflow(notebook, name, output_telemetry=Telemetry()): template = env.get_template("pdf_workflow.yml.jinja2") elif "flowasfunction" in workflow_name: template = env.get_template("flow_as_function.yml.jinja2") + elif "traceautogengroupchat" in workflow_name: + template = env.get_template("autogen_workflow.yml.jinja2") content = template.render( {