diff --git a/.cspell.json b/.cspell.json index 662e7e4879a..12e55fde2d1 100644 --- a/.cspell.json +++ b/.cspell.json @@ -140,7 +140,8 @@ "pkill", "pgrep", "Hwfoxydrg", - "llms" + "llms", + "vcrpy" ], "allowCompoundWords": true } diff --git a/.github/actions/step_merge_main/action.yml b/.github/actions/step_merge_main/action.yml new file mode 100644 index 00000000000..c66a8e79c6f --- /dev/null +++ b/.github/actions/step_merge_main/action.yml @@ -0,0 +1,11 @@ +name: step_merge_main +runs: + using: composite + steps: + - name: Merge main to current branch + working-directory: ${{ github.workspace }} + shell: pwsh + run: | + git config --global user.name 'prompt flow fundamental' + git config --global user.email 'aml-pt-eng@microsoft.com' + git pull --no-ff origin main diff --git a/.github/actions/step_sdk_setup/action.yml b/.github/actions/step_sdk_setup/action.yml index e5cbb0dae81..06776e581d4 100644 --- a/.github/actions/step_sdk_setup/action.yml +++ b/.github/actions/step_sdk_setup/action.yml @@ -36,7 +36,7 @@ runs: pip list python ./setup.py bdist_wheel $package = Get-ChildItem ./dist | ? { $_.Name.Contains('.whl')} - pip install $($package.FullName + "[azure]") + pip install $($package.FullName + "[azure,executable]") echo "########### pip freeze (After) ###########" pip freeze working-directory: ${{ inputs.scriptPath }} diff --git a/.github/actions/step_sdk_setup_win/action.yml b/.github/actions/step_sdk_setup_win/action.yml index 89075bfd887..2ad1ab91001 100644 --- a/.github/actions/step_sdk_setup_win/action.yml +++ b/.github/actions/step_sdk_setup_win/action.yml @@ -34,7 +34,7 @@ runs: pip list python ./setup.py bdist_wheel $package = Get-ChildItem ./dist | ? { $_.Name.Contains('.whl')} - pip install $($package.FullName + "[azure]") + pip install $($package.FullName + "[azure,executable]") echo "########### pip freeze (After) ###########" pip freeze working-directory: ${{ inputs.scriptPath }} diff --git a/.github/workflows/promptflow-executor-e2e-test.yml b/.github/workflows/promptflow-executor-e2e-test.yml index 439c5648d74..8aec7e14728 100644 --- a/.github/workflows/promptflow-executor-e2e-test.yml +++ b/.github/workflows/promptflow-executor-e2e-test.yml @@ -28,13 +28,16 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - name: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha || github.ref }} + fetch-depth: 0 + - name: merge main to current branch + uses: "./.github/actions/step_merge_main" - name: Display and Set Environment Variables run: | if [ "ubuntu-latest" == "${{ matrix.os }}" ]; then diff --git a/.github/workflows/promptflow-executor-unit-test.yml b/.github/workflows/promptflow-executor-unit-test.yml index 6ac821fe0da..eb481694545 100644 --- a/.github/workflows/promptflow-executor-unit-test.yml +++ b/.github/workflows/promptflow-executor-unit-test.yml @@ -28,13 +28,16 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - name: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha || github.ref }} + fetch-depth: 0 + - name: merge main to current branch + uses: "./.github/actions/step_merge_main" - name: Display and Set Environment Variables run: | if [ "ubuntu-latest" == "${{ matrix.os }}" ]; then @@ -112,4 +115,4 @@ jobs: pythonVersion: 3.9 coverageThreshold: 50 token: ${{ secrets.GITHUB_TOKEN }} - context: test/executor_unit + context: test/executor_unit \ No newline at end of file diff --git a/.github/workflows/promptflow-global-config-test.yml b/.github/workflows/promptflow-global-config-test.yml index 6d87d53400a..34f078efa22 100644 --- a/.github/workflows/promptflow-global-config-test.yml +++ b/.github/workflows/promptflow-global-config-test.yml @@ -32,9 +32,12 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha || github.ref }} + fetch-depth: 0 + - name: merge main to current branch + uses: "./.github/actions/step_merge_main" - name: Display and Set Environment Variables run: | if [ "ubuntu-latest" == "${{ matrix.os }}" ]; then diff --git a/.github/workflows/promptflow-release-testing-matrix.yml b/.github/workflows/promptflow-release-testing-matrix.yml index d2c88f5ffb4..17b8ff805ae 100644 --- a/.github/workflows/promptflow-release-testing-matrix.yml +++ b/.github/workflows/promptflow-release-testing-matrix.yml @@ -81,6 +81,7 @@ jobs: gci env:* | sort-object name az account show pip install langchain + pip install numexpr python scripts/building/run_coverage_tests.py ` -p ${{ github.workspace }}/src/promptflow/promptflow ` -t ${{ github.workspace }}/src/promptflow/tests/executor/e2etests ${{ github.workspace }}/src/promptflow/tests/executor/unittests ` diff --git a/.github/workflows/promptflow-sdk-cli-azure-e2e-test.yml b/.github/workflows/promptflow-sdk-cli-azure-e2e-test.yml new file mode 100644 index 00000000000..2ea72fdaf79 --- /dev/null +++ b/.github/workflows/promptflow-sdk-cli-azure-e2e-test.yml @@ -0,0 +1,100 @@ +name: promptflow-sdk-cli-azure-e2e-test + +on: + pull_request: + paths: + - src/promptflow/** + - scripts/** + - '**promptflow-sdk-cli-azure-e2e-test.yml' + workflow_dispatch: + +env: + packageSetupType: promptflow_with_extra + testWorkingDirectory: ${{ github.workspace }}/src/promptflow + PYTHONPATH: ${{ github.workspace }}/src/promptflow + IS_IN_CI_PIPELINE: "true" + PROMPT_FLOW_TEST_RUN_LIVE: "false" + PROMPT_FLOW_SKIP_LIVE_RECORDING: "false" + +jobs: + sdk_cli_azure_e2e_test: + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest] + runs-on: ${{ matrix.os }} + steps: + - name: checkout + uses: actions/checkout@v4 + + - name: Display and Set Environment Variables + run: | + if [ "ubuntu-latest" == "${{ matrix.os }}" ]; then + export pyVersion="3.9"; + elif [ "macos-latest" == "${{ matrix.os }}" ]; then + export pyVersion="3.10"; + else + echo "Unsupported OS: ${{ matrix.os }}"; + exit 1; + fi + env | sort >> $GITHUB_OUTPUT + id: display_env + shell: bash -el {0} + + - name: Python Setup - ${{ matrix.os }} - Python Version ${{ steps.display_env.outputs.pyVersion }} + uses: "./.github/actions/step_create_python_environment" + with: + pythonVersion: ${{ steps.display_env.outputs.pyVersion }} + + - name: Build wheel + uses: "./.github/actions/step_sdk_setup" + with: + setupType: ${{ env.packageSetupType }} + scriptPath: ${{ env.testWorkingDirectory }} + + - name: Get number of CPU cores + uses: SimenB/github-actions-cpu-cores@v1 + id: cpu-cores + + - name: Run Test + shell: pwsh + working-directory: ${{ env.testWorkingDirectory }} + run: | + gci env:* | sort-object name + python "../../scripts/building/run_coverage_tests.py" ` + -p promptflow ` + -t ${{ github.workspace }}/src/promptflow/tests/sdk_cli_azure_test/e2etests ` + -l eastus ` + -m "e2etest" ` + -n ${{ steps.cpu-cores.outputs.count }} ` + --coverage-config ${{ github.workspace }}/src/promptflow/tests/sdk_cli_test/.coveragerc + + - name: Upload Test Results + if: always() + uses: actions/upload-artifact@v3 + with: + name: Test Results (Python ${{ steps.display_env.outputs.pyVersion }}) (OS ${{ matrix.os }}) + path: | + ${{ env.testWorkingDirectory }}/*.xml + ${{ env.testWorkingDirectory }}/htmlcov/ + + publish-test-results: + name: "Publish Tests Results" + needs: sdk_cli_azure_e2e_test + runs-on: ubuntu-latest + permissions: write-all + if: always() + + steps: + - name: checkout + uses: actions/checkout@v3 + - name: Publish Test Results + uses: "./.github/actions/step_publish_test_results" + with: + testActionFileName: promptflow-sdk-cli-azure-e2e-test.yml + testResultTitle: promptflow SDK CLI Azure E2E Test Result + osVersion: ubuntu-latest + pythonVersion: 3.9 + coverageThreshold: 40 + token: ${{ secrets.GITHUB_TOKEN }} + context: test/sdk_cli diff --git a/.github/workflows/promptflow-sdk-cli-test.yml b/.github/workflows/promptflow-sdk-cli-test.yml index 2f94fc27e40..ba3c2dfef5e 100644 --- a/.github/workflows/promptflow-sdk-cli-test.yml +++ b/.github/workflows/promptflow-sdk-cli-test.yml @@ -28,13 +28,16 @@ jobs: strategy: fail-fast: false matrix: - os: [ubuntu-latest, macos-latest] + os: [ubuntu-latest] runs-on: ${{ matrix.os }} steps: - name: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: ref: ${{ github.event.pull_request.head.sha || github.ref }} + fetch-depth: 0 + - name: merge main to current branch + uses: "./.github/actions/step_merge_main" - name: Display and Set Environment Variables run: | if [ "ubuntu-latest" == "${{ matrix.os }}" ]; then diff --git a/.github/workflows/promptflow-sdk-pfs-e2e-test.yml b/.github/workflows/promptflow-sdk-pfs-e2e-test.yml index d0e3e780c1f..a5ad5c4cc5d 100644 --- a/.github/workflows/promptflow-sdk-pfs-e2e-test.yml +++ b/.github/workflows/promptflow-sdk-pfs-e2e-test.yml @@ -32,7 +32,12 @@ jobs: runs-on: ${{ matrix.os }} steps: - name: checkout - uses: actions/checkout@v3 + uses: actions/checkout@v4 + with: + ref: ${{ github.event.pull_request.head.sha || github.ref }} + fetch-depth: 0 + - name: merge main to current branch + uses: "./.github/actions/step_merge_main" - name: Display and Set Environment Variables run: | if [ "ubuntu-latest" == "${{ matrix.os }}" ]; then diff --git a/.github/workflows/samples_flows_standard_flow_with_enabled_by_value.yml b/.github/workflows/samples_flows_standard_flow_with_enabled_by_value.yml new file mode 100644 index 00000000000..42229eb1204 --- /dev/null +++ b/.github/workflows/samples_flows_standard_flow_with_enabled_by_value.yml @@ -0,0 +1,89 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_flows_standard_flow_with_enabled_by_value +on: + schedule: + - cron: "27 22 * * *" # Every day starting at 6:27 BJT + pull_request: + branches: [ main ] + paths: [ examples/flows/standard/flow-with-enabled-by-value/**, examples/*requirements.txt, .github/workflows/samples_flows_standard_flow_with_enabled_by_value.yml ] + workflow_dispatch: + +jobs: + samples_readme_ci: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Generate config.json + run: echo '${{ secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ github.workspace }}/examples/config.json + - name: Prepare requirements + working-directory: examples + run: | + if [[ -e requirements.txt ]]; then + python -m pip install --upgrade pip + pip install -r requirements.txt + fi + - name: Prepare dev requirements + working-directory: examples + run: | + python -m pip install --upgrade pip + pip install -r dev_requirements.txt + - name: Refine .env file + working-directory: examples/flows/standard/flow-with-enabled-by-value + run: | + AOAI_API_KEY=${{ secrets.AOAI_API_KEY_TEST }} + AOAI_API_ENDPOINT=${{ secrets.AOAI_API_ENDPOINT_TEST }} + AOAI_API_ENDPOINT=$(echo ${AOAI_API_ENDPOINT//\//\\/}) + if [[ -e .env.example ]]; then + echo "env replacement" + sed -i -e "s//$AOAI_API_KEY/g" -e "s//$AOAI_API_ENDPOINT/g" .env.example + mv .env.example .env + fi + - name: Create run.yml + working-directory: examples/flows/standard/flow-with-enabled-by-value + run: | + gpt_base=${{ secrets.AOAI_API_ENDPOINT_TEST }} + gpt_base=$(echo ${gpt_base//\//\\/}) + if [[ -e run.yml ]]; then + sed -i -e "s/\${azure_open_ai_connection.api_key}/${{ secrets.AOAI_API_KEY_TEST }}/g" -e "s/\${azure_open_ai_connection.api_base}/$gpt_base/g" run.yml + fi + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Extract Steps examples/flows/standard/flow-with-enabled-by-value/README.md + working-directory: ${{ github.workspace }} + run: | + python scripts/readme/extract_steps_from_readme.py -f examples/flows/standard/flow-with-enabled-by-value/README.md -o examples/flows/standard/flow-with-enabled-by-value + - name: Cat script + working-directory: examples/flows/standard/flow-with-enabled-by-value + run: | + cat bash_script.sh + - name: Run scripts + working-directory: examples/flows/standard/flow-with-enabled-by-value + run: | + export aoai_api_key=${{secrets.AOAI_API_KEY_TEST }} + export aoai_api_endpoint=${{ secrets.AOAI_API_ENDPOINT_TEST }} + export test_workspace_sub_id=${{ secrets.TEST_WORKSPACE_SUB_ID }} + export test_workspace_rg=${{ secrets.TEST_WORKSPACE_RG }} + export test_workspace_name=${{ secrets.TEST_WORKSPACE_NAME_CANARY }} + bash bash_script.sh + - name: Pip List for Debug + if : ${{ always() }} + working-directory: examples/flows/standard/flow-with-enabled-by-value + run: | + pip list + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/flows/standard/flow-with-enabled-by-value/bash_script.sh \ No newline at end of file diff --git a/.github/workflows/samples_runmanagement_runmanagement.yml b/.github/workflows/samples_runmanagement_runmanagement.yml new file mode 100644 index 00000000000..949e4cede83 --- /dev/null +++ b/.github/workflows/samples_runmanagement_runmanagement.yml @@ -0,0 +1,47 @@ +# This code is autogenerated. +# Code is generated by running custom script: python3 readme.py +# Any manual changes to this file may cause incorrect behavior. +# Any manual changes will be overwritten if the code is regenerated. + +name: samples_runmanagement_runmanagement +on: + schedule: + - cron: "51 20 * * *" # Every day starting at 4:51 BJT + pull_request: + branches: [ main ] + paths: [ examples/**, .github/workflows/samples_runmanagement_runmanagement.yml ] + workflow_dispatch: + +jobs: + samples_notebook_ci: + runs-on: ubuntu-latest + steps: + - name: Checkout repository + uses: actions/checkout@v3 + - name: Generate config.json + run: echo '${{ secrets.TEST_WORKSPACE_CONFIG_JSON_CANARY }}' > ${{ github.workspace }}/examples/config.json + - name: Azure Login + uses: azure/login@v1 + with: + creds: ${{ secrets.AZURE_CREDENTIALS }} + - name: Setup Python 3.9 environment + uses: actions/setup-python@v4 + with: + python-version: "3.9" + - name: Prepare requirements + run: | + python -m pip install --upgrade pip + pip install -r ${{ github.workspace }}/examples/requirements.txt + pip install -r ${{ github.workspace }}/examples/dev_requirements.txt + - name: Create Aoai Connection + run: pf connection create -f ${{ github.workspace }}/examples/connections/azure_openai.yml --set api_key="${{ secrets.AOAI_API_KEY_TEST }}" api_base="${{ secrets.AOAI_API_ENDPOINT_TEST }}" + - name: Test Notebook + working-directory: examples/tutorials/run-management + run: | + papermill -k python run-management.ipynb run-management.output.ipynb + - name: Upload artifact + if: ${{ always() }} + uses: actions/upload-artifact@v3 + with: + name: artifact + path: examples/tutorials/run-management diff --git a/docs/how-to-guides/develop-a-flow/develop-evaluation-flow.md b/docs/how-to-guides/develop-a-flow/develop-evaluation-flow.md new file mode 100644 index 00000000000..e9518510ede --- /dev/null +++ b/docs/how-to-guides/develop-a-flow/develop-evaluation-flow.md @@ -0,0 +1,167 @@ +# Develop evaluation flow + +:::{admonition} Experimental feature +This is an experimental feature, and may change at any time. Learn [more](../faq.md#stable-vs-experimental). +::: + +The evaluation flow is a flow to test/evaluate the quality of your LLM application (standard/chat flow). It usually runs on the outputs of standard/chat flow, and compute key metrics that can be used to determine whether the standard/chat flow performs well. See [Flows](../../concepts/concept-flows.md) for more information. + +Before proceeding with this document, it is important to have a good understanding of the standard flow. Please make sure you have read [Develop standard flow](./develop-standard-flow.md), since they share many common features and these features won't be repeated in this doc, such as: +- `Inputs/Outputs definition` +- `Nodes` +- `Chain nodes in a flow` + +While the evaluation flow shares similarities with the standard flow, there are some important differences that set it apart. The main distinctions are as follows: +- `Inputs from an existing run`: The evaluation flow contains inputs that are derived from the outputs of the standard/chat flow. These inputs are used for evaluation purposes. +- `Aggregation node`: The evaluation flow contains one or more aggregation nodes, where the actual evaluation takes place. These nodes are responsible for computing metrics and determining the performance of the standard/chat flow. + +## Evaluation flow example + +In this guide, we use [eval-classification-accuracy](https://github.com/microsoft/promptflow/tree/main/examples/flows/evaluation/eval-classification-accuracy) flow as an example of the evaluation flow. This is a flow illustrating how to evaluate the performance of a classification flow. It involves comparing each prediction to the groundtruth and assigns a `Correct` or `Incorrect` grade, and aggregating the results to produce metrics such as `accuracy`, which reflects how good the system is at classifying the data. + +## Flow inputs + +The flow `eval-classification-accuracy` contains two inputs: + +```yaml +inputs: + groundtruth: + type: string + description: Groundtruth of the original question, it's the correct label that you hope your standard flow could predict. + default: APP + prediction: + type: string + description: The actual predicted outputs that your flow produces. + default: APP +``` + +As evident from the inputs description, the evaluation flow requires two specific inputs: +- `groundtruth`: This input represents the actual or expected values against which the performance of the standard/chat flow will be evaluated. +- `prediction`: The prediction input is derived from the outputs of another standard/chat flow. It contains the predicted values generated by the standard/chat flow, which will be compared to the groundtruth values during the evaluation process. + +From the definition perspective, there is no difference compared with adding an input/output in a `standard/chat flow`. However when running an evaluation flow, you may need to specify the data source from both data file and flow run outputs. For more details please refer to [Run and evaluate a flow](../run-and-evaluate-a-flow/run-and-evaluate-a-flow.md#evaluate-your-flow). + + +## Aggregation node + + +Before introducing the aggregation node, let's see what a regular node looks like, we use node `grade` in the example flow for instance: + +```yaml +- name: grade + type: python + source: + type: code + path: grade.py + inputs: + groundtruth: ${inputs.groundtruth} + prediction: ${inputs.prediction} +``` + +It takes both `groundtruth` and `prediction` from the flow inputs, compare them in the source code to see if they match: + +```python +from promptflow import tool + +@tool +def grade(groundtruth: str, prediction: str): + return "Correct" if groundtruth.lower() == prediction.lower() else "Incorrect" +``` + +When it comes to an `aggregation node`, there are two key distinctions that set it apart from a regular node: +1. It has an attribute `aggregation` set to be `true`. + +```yaml +- name: calculate_accuracy + type: python + source: + type: code + path: calculate_accuracy.py + inputs: + grades: ${grade.output} + aggregation: true # Add this attribute to make it an aggregation node +``` + +2. Its source code accepts a `List` type parameter which is a collection of the previous regular node's outputs. + +```python +from typing import List +from promptflow import log_metric, tool + +@tool +def calculate_accuracy(grades: List[str]): + result = [] + for index in range(len(grades)): + grade = grades[index] + result.append(grade) + + # calculate accuracy for each variant + accuracy = round((result.count("Correct") / len(result)), 2) + log_metric("accuracy", accuracy) + + return result +``` + +The parameter `grades` in above function, contains all results that are produced by the regular node `grade`. Assuming the referred standard flow run has 3 outputs: + +```json +{"prediction": "App"} +{"prediction": "Channel"} +{"prediction": "Academic"} +``` + + + And we provides a data file like this: + ```json +{"groundtruth": "App"} +{"groundtruth": "Channel"} +{"groundtruth": "Wiki"} +``` + +Then the `grades` value would be `["Correct", "Correct", "Incorrect"]`, and the final accuracy is `0.67`. + +This example provides a straightforward demonstration of how to evaluate the classification flow. Once you have a solid understanding of the evaluation mechanism, you can customize and design your own evaluation method to suit your specific needs. + +### More about the list parameter + +What if the number of referred standard flow run outputs does not match the provided data file? We know that a standard flow can be executed against multiple line data and some of them could fail while others succeed. Consider the same standard flow run mentioned in above example but the `2nd` line run has failed, thus we have below run outputs: + + +```json +{"prediction": "App"} +{"prediction": "Academic"} +``` + +The promptflow flow executor has the capability to recognize the index of the referred run's outputs and extract the corresponding data from the provided data file. This means that during the execution process, even if the same data file is provided(3 lines), only the specific data mentioned below will be processed: + + ```json +{"groundtruth": "App"} +{"groundtruth": "Wiki"} +``` + +In this case, the `grades` value would be `["Correct", "Incorrect"]` and the accuracy is `0.5`. + + +### How to set aggregation node in VS Code Extention + + +![img](../../media/how-to-guides/develop-evaluation-flow/set_aggregation_node_in_vscode.png) + + +## How to log metrics + +Promptflow supports logging and tracking experiments using `log_metric` function. A metric is a key-value pair that records a single float measure. In a python node, you can log a metric with below code: + +```python +from promptflow import log_metric, tool + +@tool +def example_log_metrics(): + metric_key = "accuracy" + metric_value = 1.0 + log_metric(metric_key, metric_value) +``` + +After the run is completed, you can run `pf run show-metrics -n ` to see the metrics. + +![img](../../media/how-to-guides/run_show_metrics.png) diff --git a/docs/how-to-guides/develop-a-flow/index.md b/docs/how-to-guides/develop-a-flow/index.md index 0bc532f214c..c5f12a97527 100644 --- a/docs/how-to-guides/develop-a-flow/index.md +++ b/docs/how-to-guides/develop-a-flow/index.md @@ -7,4 +7,5 @@ We provide guides on how to develop a flow by writing a flow yaml from scratch i develop-standard-flow develop-chat-flow +develop-evaluation-flow ``` \ No newline at end of file diff --git a/docs/how-to-guides/manage-runs.md b/docs/how-to-guides/manage-runs.md index 5395e840404..f98f40d44b4 100644 --- a/docs/how-to-guides/manage-runs.md +++ b/docs/how-to-guides/manage-runs.md @@ -45,7 +45,7 @@ column_mapping: run: ``` -Reference [here](./run-and-evaluate-a-flow/use-column-mapping.md) for detailed information for column mapping. +Reference [here](https://aka.ms/pf/column-mapping) for detailed information for column mapping. You can find additional information about flow yaml schema in [Run YAML Schema](../reference/run-yaml-schema-reference.md). After preparing the yaml file, use the CLI command below to create them: diff --git a/docs/how-to-guides/run-and-evaluate-a-flow/run-and-evaluate-a-flow.md b/docs/how-to-guides/run-and-evaluate-a-flow/run-and-evaluate-a-flow.md index 683fc289dcc..c2510cd4b18 100644 --- a/docs/how-to-guides/run-and-evaluate-a-flow/run-and-evaluate-a-flow.md +++ b/docs/how-to-guides/run-and-evaluate-a-flow/run-and-evaluate-a-flow.md @@ -28,7 +28,7 @@ Create the run with flow and data, can add `--stream` to stream the run. pf run create --flow standard/web-classification --data standard/web-classification/data.jsonl --column-mapping url='${data.url}' --stream ``` -Note `column-mapping` is a mapping from flow input name to specified values, see more details in [Use column mapping](./use-column-mapping.md). +Note `column-mapping` is a mapping from flow input name to specified values, see more details in [Use column mapping](https://aka.ms/pf/column-mapping). You can also name the run by specifying `--name my_first_run` in above command, otherwise the run name will be generated in a certain pattern which has timestamp inside. @@ -103,7 +103,7 @@ Click the bulk test button on the top of the visual editor to trigger flow test. ## Evaluate your flow -You can use an evaluation method to evaluate your flow. The evaluation methods are also flows which use Python or LLM etc., to calculate metrics like accuracy, relevance score. +You can use an evaluation method to evaluate your flow. The evaluation methods are also flows which use Python or LLM etc., to calculate metrics like accuracy, relevance score. Please refer to [Develop evaluation flow](../develop-a-flow/develop-evaluation-flow.md) to learn how to develop an evaluation flow. In this guide, we use [eval-classification-accuracy](https://github.com/microsoft/promptflow/tree/main/examples/flows/evaluation/eval-classification-accuracy) flow to evaluate. This is a flow illustrating how to evaluate the performance of a classification system. It involves comparing each prediction to the groundtruth and assigns a `Correct` or `Incorrect` grade, and aggregating the results to produce metrics such as `accuracy`, which reflects how good the system is at classifying the data. @@ -118,10 +118,10 @@ In this guide, we use [eval-classification-accuracy](https://github.com/microsof After the run is finished, you can evaluate the run with below command, compared with the normal run create command, note there are two extra arguments: -- `column-mapping`: A mapping from flow input name to specified data values. Reference [here](./use-column-mapping.md) for detailed information. +- `column-mapping`: A mapping from flow input name to specified data values. Reference [here](https://aka.ms/pf/column-mapping) for detailed information. - `run`: The run name of the flow run to be evaluated. -More details can be found in [Use column mapping](./use-column-mapping.md). +More details can be found in [Use column mapping](https://aka.ms/pf/column-mapping). ```sh pf run create --flow evaluation/eval-classification-accuracy --data standard/web-classification/data.jsonl --column-mapping groundtruth='${data.answer}' prediction='${run.outputs.category}' --run my_first_run --stream @@ -161,7 +161,7 @@ After the run is finished, you can evaluate the run with below command, compared - If the data column is from your flow output, then it is specified as `${run.outputs.}`. - `run`: The run name or run instance of the flow run to be evaluated. -More details can be found in [Use column mapping](./use-column-mapping.md). +More details can be found in [Use column mapping](https://aka.ms/pf/column-mapping). ```python # set eval flow path @@ -212,24 +212,6 @@ There are actions to trigger local batch runs. To perform an evaluation you can :::: -## How to log metrics - -Promptflow supports logging and tracking experiments using `log_metric` function. A metric is a key-value pair that records a single float measure. In a python node, you can log a metric with below code: - -```python -from promptflow import log_metric, tool - -@tool -def example_log_metrics(): - metric_key = "accuracy" - metric_value = 1.0 - log_metric(metric_key, metric_value) -``` - -After the run is completed, you can run `pf run show-metrics -n ` to see the metrics. - -![img](../../media/how-to-guides/run_show_metrics.png) - ## Next steps Learn more about: diff --git a/docs/how-to-guides/tune-prompts-with-variants.md b/docs/how-to-guides/tune-prompts-with-variants.md index d343b463e1a..70e6c9f99cc 100644 --- a/docs/how-to-guides/tune-prompts-with-variants.md +++ b/docs/how-to-guides/tune-prompts-with-variants.md @@ -71,7 +71,7 @@ Assuming you are in working directory `/examples/flows/ Note we pass `--variant` to specify which variant of the node should be running. ```sh -pf run create --flow web-classification --data web-classification/data.jsonl --variant '${summarize_text_content.variant_1}' --stream --name my_first_variant_run +pf run create --flow web-classification --data web-classification/data.jsonl --variant '${summarize_text_content.variant_1}' --column-mapping url='${data.url}' --stream --name my_first_variant_run ``` ::: @@ -91,6 +91,7 @@ variant_run = pf.run( flow=flow, data=data, variant="${summarize_text_content.variant_1}", # use variant 1. + column_mapping={"url": "${data.url}"}, ) pf.stream(variant_run) diff --git a/docs/media/how-to-guides/develop-evaluation-flow/set_aggregation_node_in_vscode.png b/docs/media/how-to-guides/develop-evaluation-flow/set_aggregation_node_in_vscode.png new file mode 100644 index 00000000000..4b99073379a Binary files /dev/null and b/docs/media/how-to-guides/develop-evaluation-flow/set_aggregation_node_in_vscode.png differ diff --git a/docs/reference/pf-command-reference.md b/docs/reference/pf-command-reference.md index 12dafbecbbc..2272280c683 100644 --- a/docs/reference/pf-command-reference.md +++ b/docs/reference/pf-command-reference.md @@ -697,6 +697,7 @@ Initialize a tool directory. ```bash pf tool init [--package] [--tool] + [--set] ``` #### Examples @@ -707,6 +708,18 @@ Creating a package tool from scratch. pf tool init --package --tool ``` +Creating a package tool with extra info. + +```bash +pf tool init --package --tool --set icon= category= tags="{'': ''}" +``` + +Creating a package tool from scratch. + +```bash +pf tool init --package --tool +``` + Creating a python tool from scratch. ```bash @@ -723,6 +736,10 @@ The package name to create. The tool name to create. +`--set` + +Set extra information about the tool, like category, icon and tags. Example: --set =. + ### pf tool list List all tools in the environment. diff --git a/docs/reference/pfazure-command-reference.md b/docs/reference/pfazure-command-reference.md index 2d2f19c029f..7634e28fa36 100644 --- a/docs/reference/pfazure-command-reference.md +++ b/docs/reference/pfazure-command-reference.md @@ -64,7 +64,7 @@ Inputs column mapping, use `${data.xx}` to refer to data file columns, use `${ru `--run` -Referenced flow run name. For example, you can run an evaluation flow against an existing run. For example, "pfazure run create --flow evaluation_flow_dir --run existing_bulk_run". +Referenced flow run name. For example, you can run an evaluation flow against an existing run. For example, "pfazure run create --flow evaluation_flow_dir --run existing_bulk_run --column-mapping url='${data.url}'". `--variant` diff --git a/examples/README.md b/examples/README.md index 41617e99a4e..d5d08ca1f33 100644 --- a/examples/README.md +++ b/examples/README.md @@ -47,6 +47,7 @@ | [conditional-flow-for-switch](flows/standard/conditional-flow-for-switch/README.md) | [![samples_flows_standard_conditional_flow_for_switch](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_conditional_flow_for_switch.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_conditional_flow_for_switch.yml) | This example is a conditional flow for switch scenario | | [customer-intent-extraction](flows/standard/customer-intent-extraction/README.md) | [![samples_flows_standard_customer_intent_extraction](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_customer_intent_extraction.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_customer_intent_extraction.yml) | This sample is using OpenAI chat model(ChatGPT/GPT4) to identify customer intent from customer's question | | [flow-with-additional-includes](flows/standard/flow-with-additional-includes/README.md) | [![samples_flows_standard_flow_with_additional_includes](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_additional_includes.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_additional_includes.yml) | User sometimes need to reference some common files or folders, this sample demos how to solve the problem using additional_includes | +| [flow-with-enabled-by-value](flows/standard/flow-with-enabled-by-value/README.md) | [![samples_flows_standard_flow_with_enabled_by_value](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_enabled_by_value.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_enabled_by_value.yml) | A standard flow using "enabled_by_value" python tool | | [flow-with-symlinks](flows/standard/flow-with-symlinks/README.md) | [![samples_flows_standard_flow_with_symlinks](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_symlinks.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_flow_with_symlinks.yml) | User sometimes need to reference some common files or folders, this sample demos how to solve the problem using symlinks | | [gen-docstring](flows/standard/gen-docstring/README.md) | [![samples_flows_standard_gen_docstring](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_gen_docstring.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_gen_docstring.yml) | This example can help you automatically generate Python code's docstring and return the modified code | | [maths-to-code](flows/standard/maths-to-code/README.md) | [![samples_flows_standard_maths_to_code](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_maths_to_code.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_standard_maths_to_code.yml) | Math to Code is a project that utilizes the power of the chatGPT model to generate code that models math questions and then executes the generated code to obtain the final numerical answer | @@ -90,6 +91,7 @@ | [quickstart-azure.ipynb](tutorials/get-started/quickstart-azure.ipynb) | [![samples_getstarted_quickstartazure](https://github.com/microsoft/promptflow/actions/workflows/samples_getstarted_quickstartazure.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_getstarted_quickstartazure.yml) | A quickstart tutorial to run a flow in Azure AI and evaluate it. | | [pipeline.ipynb](tutorials/flow-in-pipeline/pipeline.ipynb) | [![samples_flowinpipeline_pipeline](https://github.com/microsoft/promptflow/actions/workflows/samples_flowinpipeline_pipeline.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flowinpipeline_pipeline.yml) | {'description': 'Create pipeline using components to run a distributed job with tensorflow'} | | [cloud-run-management.ipynb](tutorials/run-management/cloud-run-management.ipynb) | [![samples_runmanagement_cloudrunmanagement](https://github.com/microsoft/promptflow/actions/workflows/samples_runmanagement_cloudrunmanagement.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_runmanagement_cloudrunmanagement.yml) | Flow run management in Azure AI | +| [run-management.ipynb](tutorials/run-management/run-management.ipynb) | [![samples_runmanagement_runmanagement](https://github.com/microsoft/promptflow/actions/workflows/samples_runmanagement_runmanagement.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_runmanagement_runmanagement.yml) | Flow run management | | [connection.ipynb](connections/connection.ipynb) | [![samples_connections_connection](https://github.com/microsoft/promptflow/actions/workflows/samples_connections_connection.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_connections_connection.yml) | Manage various types of connections using sdk | | [chat-with-pdf-azure.ipynb](flows/chat/chat-with-pdf/chat-with-pdf-azure.ipynb) | [![samples_flows_chat_chatwithpdf_chatwithpdfazure](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_chat_chatwithpdf_chatwithpdfazure.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_chat_chatwithpdf_chatwithpdfazure.yml) | A tutorial of chat-with-pdf flow that executes in Azure AI | | [chat-with-pdf.ipynb](flows/chat/chat-with-pdf/chat-with-pdf.ipynb) | [![samples_flows_chat_chatwithpdf_chatwithpdf](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_chat_chatwithpdf_chatwithpdf.yml/badge.svg?branch=main)](https://github.com/microsoft/promptflow/actions/workflows/samples_flows_chat_chatwithpdf_chatwithpdf.yml) | A tutorial of chat-with-pdf flow that allows user ask questions about the content of a PDF file and get answers | diff --git a/examples/flows/evaluation/eval-basic/README.md b/examples/flows/evaluation/eval-basic/README.md index 690ed74fe48..163712cddc0 100644 --- a/examples/flows/evaluation/eval-basic/README.md +++ b/examples/flows/evaluation/eval-basic/README.md @@ -36,5 +36,8 @@ pf flow test --flow . --node line_process --inputs groundtruth=ABC prediction=AB There are two ways to evaluate an classification flow. ```bash -pf run create --flow . --data ./data.jsonl --stream -``` \ No newline at end of file +pf run create --flow . --data ./data.jsonl --column-mapping groundtruth='${data.groundtruth}' prediction='${data.prediction}' --stream +``` + +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. diff --git a/examples/flows/evaluation/eval-classification-accuracy/README.md b/examples/flows/evaluation/eval-classification-accuracy/README.md index 688555551c3..517df33ea7a 100644 --- a/examples/flows/evaluation/eval-classification-accuracy/README.md +++ b/examples/flows/evaluation/eval-classification-accuracy/README.md @@ -38,9 +38,12 @@ pf flow test --flow . --node grade --inputs groundtruth=groundtruth prediction=p There are two ways to evaluate an classification flow. ```bash -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping groundtruth='${data.groundtruth}' prediction='${data.prediction}' --stream ``` +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + ### 3. create run against other flow run Learn more in [web-classification](../../standard/web-classification/README.md) diff --git a/examples/flows/evaluation/eval-entity-match-rate/README.md b/examples/flows/evaluation/eval-entity-match-rate/README.md index 041ea7364c3..c04180efdd6 100644 --- a/examples/flows/evaluation/eval-entity-match-rate/README.md +++ b/examples/flows/evaluation/eval-entity-match-rate/README.md @@ -22,6 +22,8 @@ pf flow test --flow . ### 2. create flow run with multi line data ```bash -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping ground_truth='${data.ground_truth}' entities='${data.entities}' --stream ``` +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. diff --git a/examples/flows/evaluation/eval-groundedness/README.md b/examples/flows/evaluation/eval-groundedness/README.md index 90ee849da25..2154bd0090a 100644 --- a/examples/flows/evaluation/eval-groundedness/README.md +++ b/examples/flows/evaluation/eval-groundedness/README.md @@ -25,6 +25,8 @@ pf flow test --flow . ### 2. create flow run with multi line data ```bash -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping question='${data.question}' answer='${data.answer}' context='${data.context}' --stream ``` +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. diff --git a/examples/flows/evaluation/eval-perceived-intelligence/README.md b/examples/flows/evaluation/eval-perceived-intelligence/README.md index c299a43b763..1a9d0b5c4f5 100644 --- a/examples/flows/evaluation/eval-perceived-intelligence/README.md +++ b/examples/flows/evaluation/eval-perceived-intelligence/README.md @@ -26,6 +26,8 @@ pf flow test --flow . ### 2. create flow run with multi line data ```bash -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping question='${data.question}' answer='${data.answer}' context='${data.context}' --stream ``` +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. diff --git a/examples/flows/standard/autonomous-agent/README.md b/examples/flows/standard/autonomous-agent/README.md index 40af28220e6..c0640ad5b67 100644 --- a/examples/flows/standard/autonomous-agent/README.md +++ b/examples/flows/standard/autonomous-agent/README.md @@ -56,8 +56,11 @@ pf flow test --flow . ```bash # create run using command line args -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping name='${data.name}' role='${data.role}' goals='${data.goals}' --stream ``` +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + ## Disclaimer LLM systems are susceptible to prompt injection, and you can gain a deeper understanding of this issue in the [technical blog](https://developer.nvidia.com/blog/securing-llm-systems-against-prompt-injection/). As an illustration, the PythonREPL function might execute harmful code if provided with a malicious prompt within the provided sample. Furthermore, we cannot guarantee that implementing AST validations solely within the PythonREPL function will reliably elevate the sample's security to an enterprise level. We kindly remind you to refrain from utilizing this in a production environment. \ No newline at end of file diff --git a/examples/flows/standard/basic-with-builtin-llm/README.md b/examples/flows/standard/basic-with-builtin-llm/README.md index f7a057b894a..8232f182323 100644 --- a/examples/flows/standard/basic-with-builtin-llm/README.md +++ b/examples/flows/standard/basic-with-builtin-llm/README.md @@ -48,9 +48,12 @@ pf flow test --flow . --inputs text="Python Hello World!" - create run ```bash -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping text='${data.text}' --stream ``` +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + - list and show run meta ```bash # list created run diff --git a/examples/flows/standard/basic-with-connection/README.md b/examples/flows/standard/basic-with-connection/README.md index 351c342ed5c..4fe48c879d5 100644 --- a/examples/flows/standard/basic-with-connection/README.md +++ b/examples/flows/standard/basic-with-connection/README.md @@ -49,9 +49,12 @@ pf flow test --flow . --node llm --inputs prompt="Write a simple Hello World! pr - create run ```bash -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping text='${data.text}' --stream ``` +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + - list and show run meta ```bash # list created run @@ -87,7 +90,7 @@ pf connection create --file ../../../connections/azure_openai.yml --set api_key= Run flow with newly created connection. ```bash -pf run create --flow . --data ./data.jsonl --connections llm.connection=open_ai_connection --stream +pf run create --flow . --data ./data.jsonl --connections llm.connection=open_ai_connection --column-mapping text='${data.text}' --stream ``` ### Run in cloud with connection override @@ -101,5 +104,5 @@ Run flow with connection `open_ai_connection`. az account set -s az configure --defaults group= workspace= -pfazure run create --flow . --data ./data.jsonl --connections llm.connection=open_ai_connection --stream --runtime example-runtime-ci +pfazure run create --flow . --data ./data.jsonl --connections llm.connection=open_ai_connection --column-mapping text='${data.text}' --stream --runtime example-runtime-ci ``` diff --git a/examples/flows/standard/basic/README.md b/examples/flows/standard/basic/README.md index 43ae4c41d74..a4458623be2 100644 --- a/examples/flows/standard/basic/README.md +++ b/examples/flows/standard/basic/README.md @@ -42,9 +42,12 @@ pf flow test --flow . --node llm --inputs prompt="Write a simple Hello World pro - Create run with multiple lines data ```bash # using environment from .env file (loaded in user code: hello.py) -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping text='${data.text}' --stream ``` +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + - List and show run meta ```bash # list created run @@ -87,7 +90,7 @@ pf flow test --flow . --environment-variables AZURE_OPENAI_API_KEY='${open_ai_co - Create run using connection secret binding specified in environment variables, see [run.yml](run.yml) ```bash # create run -pf run create --flow . --data ./data.jsonl --stream --environment-variables AZURE_OPENAI_API_KEY='${open_ai_connection.api_key}' AZURE_OPENAI_API_BASE='${open_ai_connection.api_base}' +pf run create --flow . --data ./data.jsonl --stream --environment-variables AZURE_OPENAI_API_KEY='${open_ai_connection.api_key}' AZURE_OPENAI_API_BASE='${open_ai_connection.api_base}' --column-mapping text='${data.text}' # create run using yaml file pf run create --file run.yml --stream @@ -107,7 +110,7 @@ az configure --defaults group= workspace= workspace= -g -w -# pfazure run create --flow . --data ./data.jsonl --stream # automatic runtime +pfazure run create --flow . --data ./data.jsonl --column-mapping url='${data.url}' --stream --runtime example-runtime-ci --subscription -g -w +# pfazure run create --flow . --data ./data.jsonl --column-mapping url='${data.url}' --stream # automatic runtime # set default workspace az account set -s diff --git a/examples/flows/standard/flow-with-symlinks/run.yml b/examples/flows/standard/flow-with-symlinks/run.yml new file mode 100644 index 00000000000..516376919fb --- /dev/null +++ b/examples/flows/standard/flow-with-symlinks/run.yml @@ -0,0 +1,6 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json +flow: . +data: data.jsonl +variant: ${summarize_text_content.variant_1} +column_mapping: + url: ${data.url} \ No newline at end of file diff --git a/examples/flows/standard/gen-docstring/README.md b/examples/flows/standard/gen-docstring/README.md index d53a39821b7..b6508fad575 100644 --- a/examples/flows/standard/gen-docstring/README.md +++ b/examples/flows/standard/gen-docstring/README.md @@ -54,8 +54,9 @@ pf flow test --flow . --inputs source="./azure_open_ai.py" ```bash # run flow with batch data -pf run create --flow . --data ./data.jsonl --name auto_generate_docstring +pf run create --flow . --data ./data.jsonl --name auto_generate_docstring --column-mapping source='${data.source}' ``` Output the code after add the docstring. - +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. diff --git a/examples/flows/standard/named-entity-recognition/README.md b/examples/flows/standard/named-entity-recognition/README.md index aef7de8107e..6ae2931b7c1 100644 --- a/examples/flows/standard/named-entity-recognition/README.md +++ b/examples/flows/standard/named-entity-recognition/README.md @@ -52,7 +52,8 @@ pf flow test --flow . --inputs text='The phone number (321) 654-0987 is no longe - create run ```bash -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping entity_type='${data.entity_type}' text='${data.text}' --stream ``` - +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. diff --git a/examples/flows/standard/web-classification/README.md b/examples/flows/standard/web-classification/README.md index 448610725a6..a2493686bbd 100644 --- a/examples/flows/standard/web-classification/README.md +++ b/examples/flows/standard/web-classification/README.md @@ -52,7 +52,7 @@ pf flow test --flow . --inputs url='https://www.youtube.com/watch?v=kYqRtjDBci8' ```bash # create run using command line args -pf run create --flow . --data ./data.jsonl --stream +pf run create --flow . --data ./data.jsonl --column-mapping url='${data.url}' --stream # (Optional) create a random run name run_name="web_classification_"$(openssl rand -hex 12) @@ -60,6 +60,9 @@ run_name="web_classification_"$(openssl rand -hex 12) pf run create --file run.yml --stream --name $run_name ``` +You can also skip providing `column-mapping` if provided data has same column name as the flow. +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. + ```bash # list run pf run list @@ -96,8 +99,8 @@ az account set -s az configure --defaults group= workspace= # create run -pfazure run create --flow . --data ./data.jsonl --stream --runtime example-runtime-ci -# pfazure run create --flow . --data ./data.jsonl --stream # automatic runtime +pfazure run create --flow . --data ./data.jsonl --column-mapping url='${data.url}' --stream --runtime example-runtime-ci +# pfazure run create --flow . --data ./data.jsonl --column-mapping url='${data.url}' --stream # automatic runtime # (Optional) create a new random run name for further use run_name="web_classification_"$(openssl rand -hex 12) diff --git a/examples/flows/standard/web-classification/run.yml b/examples/flows/standard/web-classification/run.yml index 9522372f0e0..3d38435d3e9 100644 --- a/examples/flows/standard/web-classification/run.yml +++ b/examples/flows/standard/web-classification/run.yml @@ -1,4 +1,6 @@ $schema: https://azuremlschemas.azureedge.net/promptflow/latest/Run.schema.json flow: . data: data.jsonl -variant: ${summarize_text_content.variant_1} \ No newline at end of file +variant: ${summarize_text_content.variant_1} +column_mapping: + url: ${data.url} diff --git a/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_custom_strong_type_connection.py b/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_custom_strong_type_connection.py index d7f502119a3..4450d49b198 100644 --- a/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_custom_strong_type_connection.py +++ b/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_custom_strong_type_connection.py @@ -7,7 +7,7 @@ class MyCustomConnection(CustomStrongTypeConnection): """My custom strong type connection. :param api_key: The api key get from "https://xxx.com". - :type api_key: String + :type api_key: Secret :param api_base: The api base. :type api_base: String """ diff --git a/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_enabled_by_value.py b/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_enabled_by_value.py new file mode 100644 index 00000000000..70d39e7bb01 --- /dev/null +++ b/examples/tools/tool-package-quickstart/my_tool_package/tools/tool_with_enabled_by_value.py @@ -0,0 +1,27 @@ +from enum import Enum + +from promptflow import tool + + +class UserType(str, Enum): + STUDENT = "student" + TEACHER = "teacher" + + +@tool +def my_tool(user_type: Enum, student_id: str = "", teacher_id: str = "") -> str: + """This is a dummy function to support enabled by feature. + + :param user_type: user type, student or teacher. + :param student_id: student id. + :param teacher_id: teacher id. + :return: id of the user. + If user_type is student, return student_id. + If user_type is teacher, return teacher_id. + """ + if user_type == UserType.STUDENT: + return student_id + elif user_type == UserType.TEACHER: + return teacher_id + else: + raise Exception("Invalid user.") diff --git a/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_enabled_by_value.yaml b/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_enabled_by_value.yaml new file mode 100644 index 00000000000..80eaa01b769 --- /dev/null +++ b/examples/tools/tool-package-quickstart/my_tool_package/yamls/tool_with_enabled_by_value.yaml @@ -0,0 +1,23 @@ +my_tool_package.tools.tool_with_enabled_by_value.my_tool: + function: my_tool + inputs: + user_type: + type: + - string + enum: + - student + - teacher + student_id: + type: + - string + enabled_by: user_type + enabled_by_value: [student] + teacher_id: + type: + - string + enabled_by: user_type + enabled_by_value: [teacher] + module: my_tool_package.tools.tool_with_enabled_by_value + name: My Tool with Enabled By Value + description: This is my tool with enabled by value + type: python \ No newline at end of file diff --git a/examples/tools/tool-package-quickstart/setup.py b/examples/tools/tool-package-quickstart/setup.py index 901026da85c..7d3c19068e7 100644 --- a/examples/tools/tool-package-quickstart/setup.py +++ b/examples/tools/tool-package-quickstart/setup.py @@ -4,7 +4,7 @@ setup( name=PACKAGE_NAME, - version="0.0.4", + version="0.0.5", description="This is my tools package", packages=find_packages(), entry_points={ diff --git a/examples/tools/tool-package-quickstart/tests/test_tool_with_enabled_by_value.py b/examples/tools/tool-package-quickstart/tests/test_tool_with_enabled_by_value.py new file mode 100644 index 00000000000..3a4e31ece0f --- /dev/null +++ b/examples/tools/tool-package-quickstart/tests/test_tool_with_enabled_by_value.py @@ -0,0 +1,6 @@ +from my_tool_package.tools.tool_with_enabled_by_value import my_tool + + +def test_my_tool(): + result = my_tool(user_type="student", student_id="123") + assert result == '123' diff --git a/examples/tutorials/e2e-development/chat-with-pdf.md b/examples/tutorials/e2e-development/chat-with-pdf.md index 201df938789..43c99d4dbe6 100644 --- a/examples/tutorials/e2e-development/chat-with-pdf.md +++ b/examples/tutorials/e2e-development/chat-with-pdf.md @@ -245,6 +245,7 @@ The output will include something like below: } ``` +Reference [here](https://aka.ms/pf/column-mapping) for default behavior when `column-mapping` not provided in CLI. And we developed two evaluation flows one for "[groundedness](../../flows/evaluation/eval-groundedness/)" and one for "[perceived intelligence](../../flows/evaluation/eval-perceived-intelligence/)". These two flows are using GPT models (ChatGPT or GPT4) to "grade" the answers. Reading the prompts will give you better idea what are these two metrics: - [groundedness prompt](../../flows/evaluation/eval-groundedness/gpt_groundedness.md) - [perceived intelligence prompt](../../flows/evaluation/eval-perceived-intelligence/gpt_perceived_intelligence.md) diff --git a/examples/tutorials/run-management/run-management.ipynb b/examples/tutorials/run-management/run-management.ipynb new file mode 100644 index 00000000000..e6de8fb20ef --- /dev/null +++ b/examples/tutorials/run-management/run-management.ipynb @@ -0,0 +1,238 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Flow Run Management\n", + "\n", + "**Prerequisite** - To make the most of this tutorial, you'll need:\n", + "- A local clone of the Prompt Flow repository\n", + "- A Python environment with Jupyter Notebook support (such as Jupyter Lab or the Python extension for Visual Studio Code)\n", + "- Know how to program with Python :)\n", + "\n", + "_A basic understanding of Machine Learning can be beneficial, but it's not mandatory._\n", + "\n", + "\n", + "**Learning Objectives** - By the end of this tutorial, you should be able to:\n", + "- manage runs via run.yaml\n", + "- create run which references another runs inputs\n", + "- create run with connection override\n", + "\n", + "\n", + "**Motivations** - This guide will walk you through local run management abilities." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 0. Install dependent packages" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%pip install -r ../../requirements.txt" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Create necessary connections\n", + "Connection helps securely store and manage secret keys or other sensitive credentials required for interacting with LLM and other external tools for example Azure Content Safety.\n", + "\n", + "This notebook's will use connection `open_ai_connection` inside, we need to set up the connection if we haven't added it before. After created, it's stored in local db and can be used in any flow.\n", + "\n", + "Prepare your Azure Open AI resource follow this [instruction](https://learn.microsoft.com/en-us/azure/cognitive-services/openai/how-to/create-resource?pivots=web-portal) and get your `api_key` if you don't have one." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import json\n", + "from promptflow import PFClient\n", + "from promptflow.connections import AzureOpenAIConnection, OpenAIConnection\n", + "\n", + "# client can help manage your runs and connections.\n", + "pf = PFClient()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " conn_name = \"open_ai_connection\"\n", + " conn = pf.connections.get(name=conn_name)\n", + " print(\"using existing connection\")\n", + "except:\n", + " # Follow https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/create-resource?pivots=web-portal to create an Azure Open AI resource.\n", + " connection = AzureOpenAIConnection(\n", + " name=conn_name,\n", + " api_key=\"\",\n", + " api_base=\"\",\n", + " api_type=\"azure\",\n", + " api_version=\"\",\n", + " )\n", + "\n", + " # use this if you have an existing OpenAI account\n", + " # connection = OpenAIConnection(\n", + " # name=conn_name,\n", + " # api_key=\"\",\n", + " # )\n", + "\n", + " conn = pf.connections.create_or_update(connection)\n", + " print(\"successfully created connection\")\n", + "\n", + "print(conn)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 2. Create run with YAML file\n", + "\n", + "You can save configurations for a run in a YAML file to save the effort to repeately provide them in SDK/CLI.\n", + "In this step, we will create a sample run with a YAML file. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow._sdk._load_functions import load_run\n", + "\n", + "# load a run from YAML file\n", + "base_run = load_run(\n", + " source=\"../../flows/standard/web-classification/run.yml\",\n", + " # override the default params in the YAML file\n", + " params_override=[\n", + " {\"column_mapping\": {\"url\": \"${data.url}\"}}\n", + " ]\n", + ")\n", + "\n", + "# create the run\n", + "base_run = pf.runs.create_or_update(run=base_run)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "details = pf.get_details(base_run)\n", + "details.head(10)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3 Create a flow run which uses an existing run's inputs data\n", + "\n", + "When running a flow with an existing run, you can reference either it's inputs or outputs in column mapping.\n", + "The following code cell show how to reference a run's inputs in column mapping." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from promptflow.entities import Run\n", + "\n", + "# directly create the run object\n", + "run = Run(\n", + " # local flow file\n", + " flow=\"../../flows/standard/web-classification\",\n", + " # run name\n", + " run=base_run,\n", + " column_mapping={\n", + " # reference another run's inputs\n", + " \"url\": \"${run.inputs.url}\",\n", + " },\n", + ")\n", + "\n", + "base_run = pf.runs.create_or_update(\n", + " run=run,\n", + ")\n", + "\n", + "pf.runs.stream(base_run)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 4. Create a flow run with connection override\n", + "\n", + "Sometime you want to switch connection or deployment name inside a flow when submitting it.\n", + "Connection override provided an easy way to do it without changing original `flow.dag.yaml`.\n", + "In the following code cell, we will submit flow `web-classification` and override it's connection to `open_ai_connection`. \n", + "Please make sure the connection `open_ai_connection` exists in your local environment." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "run = Run(\n", + " # local flow file\n", + " flow=\"../../flows/standard/web-classification\",\n", + " data=\"../../flows/standard/web-classification/data.jsonl\",\n", + " # override connection for node classify_with_llm & summarize_text_content\n", + " # you can replace connection to your local connections\n", + " connections={\n", + " \"classify_with_llm\": {\"connection\": \"open_ai_connection\"},\n", + " \"summarize_text_content\": {\"connection\": \"open_ai_connection\"},\n", + " },\n", + ")\n", + "\n", + "base_run = pf.runs.create_or_update(\n", + " run=run,\n", + ")\n", + "\n", + "pf.runs.stream(base_run)" + ] + } + ], + "metadata": { + "description": "Flow run management", + "kernelspec": { + "display_name": "github_v2", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.13" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/scripts/building/dev_requirements.txt b/scripts/building/dev_requirements.txt index 1fa4524789d..76d7e5f4dee 100644 --- a/scripts/building/dev_requirements.txt +++ b/scripts/building/dev_requirements.txt @@ -9,3 +9,7 @@ pytest-sugar pytest-timeout azure-keyvault azure-identity +# record and replay http requests for pfazure tests +vcrpy==5.1.0 +# parse token to get tenant id during sanitization +PyJWT==2.8.0 diff --git a/src/promptflow/CHANGELOG.md b/src/promptflow/CHANGELOG.md index ee89856fe18..b16c8f2c966 100644 --- a/src/promptflow/CHANGELOG.md +++ b/src/promptflow/CHANGELOG.md @@ -7,6 +7,8 @@ - [SDK/CLI] Support `pfazure run archive/restore/update`. - [SDK/CLI] Support custom strong type connection. - [SDK/CLI] Enable telemetry and won't collect by default, use `pf config set cli.telemetry_enabled=true` to opt in. +- [SDK/CLI] Exposed function `from promptflow import load_run` to load run object from local YAML file. +- [Executor] Support `ToolProvider` for script tools. ### Bugs Fixed - **pf config set**: @@ -16,6 +18,7 @@ - [Executor] Fix the bug can't read file containing "Private Use" unicode character. - [SDK/CLI] Fix string type data will be converted to integer/float. - [SDK/CLI] Remove the max rows limitation of loading data. +- [SDK/CLI] Fix the bug --set not taking effect when creating run from file. ### Improvements @@ -36,7 +39,7 @@ - **pf flow validate**: support validate flow - **pf config set**: support set user-level promptflow config. - Support workspace connection provider, usage: `pf config set connection.provider=azureml:/subscriptions//resourceGroups//providers/Microsoft.MachineLearningServices/workspaces/` -- Support override openai connection's model when submitting a flow. For example: `pf run create --flow ./ --data ./data.jsonl --connection llm.model=xxx` +- Support override openai connection's model when submitting a flow. For example: `pf run create --flow ./ --data ./data.jsonl --connection llm.model=xxx --column-mapping url='${data.url}'` ### Bugs Fixed - [Flow build] Fix flow build file name and environment variable name when connection name contains space. diff --git a/src/promptflow/dev_requirements.txt b/src/promptflow/dev_requirements.txt index 6d7c0d57099..4316ab1d4d3 100644 --- a/src/promptflow/dev_requirements.txt +++ b/src/promptflow/dev_requirements.txt @@ -18,3 +18,7 @@ keyrings.alt promptflow-tools beautifulsoup4==4.12.2 +# record and replay http requests for pfazure tests +vcrpy==5.1.0 +# parse token to get tenant id during sanitization +PyJWT==2.8.0 diff --git a/src/promptflow/promptflow/__init__.py b/src/promptflow/promptflow/__init__.py index 16e2c555fed..83914a2bbf3 100644 --- a/src/promptflow/promptflow/__init__.py +++ b/src/promptflow/promptflow/__init__.py @@ -9,7 +9,7 @@ from promptflow._core.tool import ToolProvider, tool # control plane sdk functions -from promptflow._sdk._load_functions import load_flow +from promptflow._sdk._load_functions import load_flow, load_run from ._sdk._pf_client import PFClient from ._version import VERSION @@ -22,6 +22,7 @@ __all__ = [ "PFClient", "load_flow", + "load_run", "log_metric", "ToolProvider", "tool", diff --git a/src/promptflow/promptflow/_cli/_params.py b/src/promptflow/promptflow/_cli/_params.py index dc074b78595..6d4b6b45564 100644 --- a/src/promptflow/promptflow/_cli/_params.py +++ b/src/promptflow/promptflow/_cli/_params.py @@ -119,6 +119,16 @@ def add_param_columns_mapping(parser): ) +def add_param_set_tool_extra_info(parser): + parser.add_argument( + "--set", + dest="extra_info", + action=AppendToDictAction, + help="Set extra information about the tool. Example: --set =.", + nargs="+", + ) + + def add_param_inputs(parser): parser.add_argument( "--inputs", diff --git a/src/promptflow/promptflow/_cli/_pf/_flow.py b/src/promptflow/promptflow/_cli/_pf/_flow.py index 16be05896eb..2cd255358bb 100644 --- a/src/promptflow/promptflow/_cli/_pf/_flow.py +++ b/src/promptflow/promptflow/_cli/_pf/_flow.py @@ -40,6 +40,7 @@ from promptflow._sdk._constants import LOGGER_NAME, PROMPT_FLOW_DIR_NAME, ConnectionProvider from promptflow._sdk._pf_client import PFClient from promptflow._sdk._utils import dump_flow_result +from promptflow.exceptions import UserErrorException DEFAULT_CONNECTION = "open_ai_connection" DEFAULT_DEPLOYMENT = "gpt-35-turbo" @@ -383,8 +384,14 @@ def test_flow(args): if args.multi_modal: with tempfile.TemporaryDirectory() as temp_dir: - from streamlit.web import cli as st_cli - + try: + from streamlit.web import cli as st_cli + import streamlit_quill # noqa: F401 + except ImportError as ex: + raise UserErrorException( + f"Please install streamlit and streamlit_quill for multi_modal, {ex.msg}. " + f"You can try 'pip install promptflow[executable]' to install them." + ) flow = load_flow(args.flow) script_path = os.path.join(temp_dir, "main.py") StreamlitFileGenerator(flow_name=flow.name, flow_dag_path=flow.flow_dag_path).generate_to_file(script_path) diff --git a/src/promptflow/promptflow/_cli/_pf/_init_entry_generators.py b/src/promptflow/promptflow/_cli/_pf/_init_entry_generators.py index dd0acdf159b..f2101aabdcc 100644 --- a/src/promptflow/promptflow/_cli/_pf/_init_entry_generators.py +++ b/src/promptflow/promptflow/_cli/_pf/_init_entry_generators.py @@ -6,12 +6,15 @@ import logging import shutil from abc import ABC, abstractmethod +from ast import literal_eval from enum import Enum from pathlib import Path from jinja2 import Environment, Template, meta from promptflow._sdk._constants import LOGGER_NAME +from promptflow.contracts.flow import Flow as ExecutableFlow +from promptflow._sdk.operations._flow_operations import FlowOperations logger = logging.getLogger(LOGGER_NAME) TEMPLATE_PATH = Path(__file__).parent.parent / "data" / "entry_flow" @@ -223,15 +226,15 @@ class StreamlitFileGenerator(BaseGenerator): def __init__(self, flow_name, flow_dag_path): self.flow_name = flow_name self.flow_dag_path = Path(flow_dag_path) + self.executable = ExecutableFlow.from_yaml( + flow_file=Path(self.flow_dag_path.name), working_dir=self.flow_dag_path.parent + ) + self.is_chat_flow, self.chat_history_input_name, _ = FlowOperations._is_chat_flow(self.executable) @property def flow_inputs(self): - from promptflow.contracts.flow import Flow as ExecutableFlow - - executable = ExecutableFlow.from_yaml( - flow_file=Path(self.flow_dag_path.name), working_dir=self.flow_dag_path.parent - ) - return {flow_input: (value.default, value.type.value) for flow_input, value in executable.inputs.items()} + return {flow_input: (value.default, value.type.value) for flow_input, value in self.executable.inputs.items() + if not value.is_chat_history} @property def flow_inputs_params(self): @@ -248,7 +251,8 @@ def flow_path(self): @property def entry_template_keys(self): - return ["flow_name", "flow_inputs", "flow_inputs_params", "flow_path"] + return ["flow_name", "flow_inputs", "flow_inputs_params", "flow_path", "is_chat_flow", + "chat_history_input_name"] class ChatFlowDAGGenerator(BaseGenerator): @@ -303,8 +307,23 @@ def copy_extra_files(flow_path, extra_files): class ToolPackageGenerator(BaseGenerator): - def __init__(self, tool_name): + def __init__(self, tool_name, icon=None, extra_info=None): self.tool_name = tool_name + self._extra_info = extra_info + self.icon = icon + + @property + def extra_info(self): + if self._extra_info: + extra_info = {} + for k, v in self._extra_info.items(): + try: + extra_info[k] = literal_eval(v) + except Exception: + extra_info[k] = repr(v) + return extra_info + else: + return {} @property def tpl_file(self): @@ -312,7 +331,20 @@ def tpl_file(self): @property def entry_template_keys(self): - return ["tool_name"] + return ["tool_name", "extra_info", "icon"] + + +class ManifestGenerator(BaseGenerator): + def __init__(self, package_name): + self.package_name = package_name + + @property + def tpl_file(self): + return TOOL_TEMPLATE_PATH / "MANIFEST.in.jinja2" + + @property + def entry_template_keys(self): + return ["package_name"] class SetupGenerator(BaseGenerator): diff --git a/src/promptflow/promptflow/_cli/_pf/_run.py b/src/promptflow/promptflow/_cli/_pf/_run.py index d9dcde950d3..8c2bb27a0f2 100644 --- a/src/promptflow/promptflow/_cli/_pf/_run.py +++ b/src/promptflow/promptflow/_cli/_pf/_run.py @@ -572,7 +572,6 @@ def create_run(create_func: Callable, args): params_override = params_override or [] if file: - params_override = [] for param_key, param in { "name": name, "flow": flow, diff --git a/src/promptflow/promptflow/_cli/_pf/_tool.py b/src/promptflow/promptflow/_cli/_pf/_tool.py index 02d72995699..87b13cad0b1 100644 --- a/src/promptflow/promptflow/_cli/_pf/_tool.py +++ b/src/promptflow/promptflow/_cli/_pf/_tool.py @@ -3,20 +3,22 @@ # --------------------------------------------------------- import argparse +import json import logging import re -import json +import shutil from pathlib import Path -from promptflow._cli._params import logging_params +from promptflow._cli._params import add_param_set_tool_extra_info, logging_params from promptflow._cli._pf._init_entry_generators import ( InitGenerator, + ManifestGenerator, SetupGenerator, ToolPackageGenerator, ToolPackageUtilsGenerator, ToolReadmeGenerator, ) -from promptflow._cli._utils import activate_action, exception_handler +from promptflow._cli._utils import activate_action, exception_handler, list_of_dict_to_dict from promptflow._sdk._constants import LOGGER_NAME from promptflow._sdk._pf_client import PFClient from promptflow.exceptions import UserErrorException @@ -44,6 +46,8 @@ def add_parser_init_tool(subparsers): # Creating a package tool from scratch: pf tool init --package package_tool --tool tool_name +# Creating a package tool with extra info: +pf tool init --package package_tool --tool tool_name --set icon= category= # Creating a python tool from scratch: pf tool init --tool tool_name """ # noqa: E501 @@ -56,6 +60,7 @@ def add_parser_init_tool(subparsers): add_params = [ add_param_package, add_param_tool, + add_param_set_tool_extra_info, ] + logging_params return activate_action( name="init", @@ -78,9 +83,7 @@ def add_parser_list_tool(subparsers): # List all package tool and code tool in the flow: pf tool list --flow flow-path """ # noqa: E501 - add_param_flow = lambda parser: parser.add_argument( # noqa: E731 - "--flow", type=str, help="the flow directory" - ) + add_param_flow = lambda parser: parser.add_argument("--flow", type=str, help="the flow directory") # noqa: E731 add_params = [ add_param_flow, ] + logging_params @@ -111,20 +114,34 @@ def init_tool(args): if not re.match(pattern, args.tool): raise UserErrorException(f"The tool name {args.tool} is a invalid identifier.") print("Creating tool from scratch...") + extra_info = list_of_dict_to_dict(args.extra_info) + icon_path = extra_info.pop("icon", None) + if icon_path: + if not Path(icon_path).exists(): + raise UserErrorException(f"Cannot find the icon path {icon_path}.") if args.package: package_path = Path(args.package) package_name = package_path.stem script_code_path = package_path / package_name script_code_path.mkdir(parents=True, exist_ok=True) + if icon_path: + package_icon_path = package_path / "icon" + package_icon_path.mkdir(exist_ok=True) + dst = shutil.copy2(icon_path, package_icon_path) + icon_path = f'Path(__file__).parent.parent / "icon" / "{Path(dst).name}"' # Generate package setup.py SetupGenerator(package_name=package_name, tool_name=args.tool).generate_to_file(package_path / "setup.py") + # Generate manifest file + ManifestGenerator(package_name=package_name).generate_to_file(package_path / "MANIFEST.in") # Generate utils.py to list meta data of tools. ToolPackageUtilsGenerator(package_name=package_name).generate_to_file(script_code_path / "utils.py") ToolReadmeGenerator(package_name=package_name, tool_name=args.tool).generate_to_file(package_path / "README.md") else: script_code_path = Path(".") # Generate tool script - ToolPackageGenerator(tool_name=args.tool).generate_to_file(script_code_path / f"{args.tool}.py") + ToolPackageGenerator(tool_name=args.tool, icon=icon_path, extra_info=extra_info).generate_to_file( + script_code_path / f"{args.tool}.py" + ) InitGenerator().generate_to_file(script_code_path / "__init__.py") print(f'Done. Created the tool "{args.tool}" in {script_code_path.resolve()}.') diff --git a/src/promptflow/promptflow/_cli/data/chat_flow/template/flow.dag.yaml.jinja2 b/src/promptflow/promptflow/_cli/data/chat_flow/template/flow.dag.yaml.jinja2 index 4d9a4f58bf6..41eaec52ccb 100644 --- a/src/promptflow/promptflow/_cli/data/chat_flow/template/flow.dag.yaml.jinja2 +++ b/src/promptflow/promptflow/_cli/data/chat_flow/template/flow.dag.yaml.jinja2 @@ -1,3 +1,4 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: chat_history: type: list diff --git a/src/promptflow/promptflow/_cli/data/entry_flow/flow.dag.yaml.jinja2 b/src/promptflow/promptflow/_cli/data/entry_flow/flow.dag.yaml.jinja2 index 25b698b6f8b..2230947b90a 100644 --- a/src/promptflow/promptflow/_cli/data/entry_flow/flow.dag.yaml.jinja2 +++ b/src/promptflow/promptflow/_cli/data/entry_flow/flow.dag.yaml.jinja2 @@ -1,3 +1,4 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: {% for arg, typ in flow_inputs.items() %} {{ arg }}: diff --git a/src/promptflow/promptflow/_cli/data/evaluation_flow/flow.dag.yaml b/src/promptflow/promptflow/_cli/data/evaluation_flow/flow.dag.yaml index c82435688e0..68d01ed5da1 100644 --- a/src/promptflow/promptflow/_cli/data/evaluation_flow/flow.dag.yaml +++ b/src/promptflow/promptflow/_cli/data/evaluation_flow/flow.dag.yaml @@ -1,3 +1,4 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: groundtruth: type: string diff --git a/src/promptflow/promptflow/_cli/data/package_tool/MANIFEST.in.jinja2 b/src/promptflow/promptflow/_cli/data/package_tool/MANIFEST.in.jinja2 new file mode 100644 index 00000000000..2a1f577f2c7 --- /dev/null +++ b/src/promptflow/promptflow/_cli/data/package_tool/MANIFEST.in.jinja2 @@ -0,0 +1 @@ +include {{ package_name }}/icons \ No newline at end of file diff --git a/src/promptflow/promptflow/_cli/data/package_tool/tool.py.jinja2 b/src/promptflow/promptflow/_cli/data/package_tool/tool.py.jinja2 index f6e53d21145..1c26ddb03c7 100644 --- a/src/promptflow/promptflow/_cli/data/package_tool/tool.py.jinja2 +++ b/src/promptflow/promptflow/_cli/data/package_tool/tool.py.jinja2 @@ -1,8 +1,21 @@ +{% if icon %} +from pathlib import Path + +{% endif %} from promptflow import tool from promptflow.connections import CustomConnection -@tool(name="{{ tool_name }}", description="This is {{ tool_name }} tool") +@tool( + name="{{ tool_name }}", + description="This is {{ tool_name }} tool", +{% if icon %} + icon={{ icon }}, +{% endif %} +{% for key, value in extra_info.items() %} + {{ key }}={{ value }}, +{% endfor %} +) def {{ tool_name }}(connection: CustomConnection, input_text: str) -> str: # Replace with your tool code. # Usually connection contains configs to connect to an API. diff --git a/src/promptflow/promptflow/_cli/data/standard_flow/flow.dag.yaml b/src/promptflow/promptflow/_cli/data/standard_flow/flow.dag.yaml index 90a57651d0d..1415bf34958 100644 --- a/src/promptflow/promptflow/_cli/data/standard_flow/flow.dag.yaml +++ b/src/promptflow/promptflow/_cli/data/standard_flow/flow.dag.yaml @@ -1,3 +1,4 @@ +$schema: https://azuremlschemas.azureedge.net/promptflow/latest/Flow.schema.json inputs: text: type: string diff --git a/src/promptflow/promptflow/_core/tool.py b/src/promptflow/promptflow/_core/tool.py index e9212c62b0c..f77521d0287 100644 --- a/src/promptflow/promptflow/_core/tool.py +++ b/src/promptflow/promptflow/_core/tool.py @@ -45,6 +45,7 @@ def tool( name: str = None, description: str = None, type: str = None, + **kwargs, ) -> Callable: """Decorator for tool functions. The decorated function will be registered as a tool and can be used in a flow. @@ -73,6 +74,7 @@ def new_f(*args, **kwargs): new_f.__name = name new_f.__description = description new_f.__type = type + new_f.__extra_info = kwargs return new_f # enable use decorator without "()" if all arguments are default values diff --git a/src/promptflow/promptflow/_core/tool_meta_generator.py b/src/promptflow/promptflow/_core/tool_meta_generator.py index eeff42dafce..d8f4ae599b2 100644 --- a/src/promptflow/promptflow/_core/tool_meta_generator.py +++ b/src/promptflow/promptflow/_core/tool_meta_generator.py @@ -120,13 +120,23 @@ def collect_tool_methods_in_module(m): return tools -def _parse_tool_from_function(f, gen_custom_type_conn=False): +def collect_tool_methods_with_init_inputs_in_module(m): + tools = [] + for _, obj in inspect.getmembers(m): + if isinstance(obj, type) and issubclass(obj, ToolProvider) and obj.__module__ == m.__name__: + for _, method in inspect.getmembers(obj): + if is_tool(method): + tools.append((method, obj.get_initialize_inputs())) + return tools + + +def _parse_tool_from_function(f, initialize_inputs=None, gen_custom_type_conn=False): if hasattr(f, "__tool") and isinstance(f.__tool, Tool): return f.__tool if hasattr(f, "__original_function"): f = f.__original_function try: - inputs, _, _ = function_to_interface(f, gen_custom_type_conn=gen_custom_type_conn) + inputs, _, _ = function_to_interface(f, initialize_inputs, gen_custom_type_conn=gen_custom_type_conn) except Exception as e: error_type_and_message = f"({e.__class__.__name__}) {e}" raise BadFunctionInterface( @@ -200,31 +210,36 @@ def load_python_module(content, source=None): def collect_tool_function_in_module(m): - tools = collect_tool_functions_in_module(m) - if len(tools) == 0: + tool_functions = collect_tool_functions_in_module(m) + tool_methods = collect_tool_methods_with_init_inputs_in_module(m) + num_tools = len(tool_functions) + len(tool_methods) + if num_tools == 0: raise NoToolDefined( message_format=( "No tool found in the python script. " "Please make sure you have one and only one tool definition in your script." ) ) - elif len(tools) > 1: - tool_names = ", ".join(t.__name__ for t in tools) + elif num_tools > 1: + tool_names = ", ".join(t.__name__ for t in tool_functions + tool_methods) raise MultipleToolsDefined( message_format=( "Expected 1 but collected {tool_count} tools: {tool_names}. " "Please make sure you have one and only one tool definition in your script." ), - tool_count=len(tools), + tool_count=num_tools, tool_names=tool_names, ) - return tools[0] + if tool_functions: + return tool_functions[0], None + else: + return tool_methods[0] def generate_python_tool(name, content, source=None): m = load_python_module(content, source) - f = collect_tool_function_in_module(m) - tool = _parse_tool_from_function(f) + f, initialize_inputs = collect_tool_function_in_module(m) + tool = _parse_tool_from_function(f, initialize_inputs) tool.module = None if name is not None: tool.name = name diff --git a/src/promptflow/promptflow/_core/tools_manager.py b/src/promptflow/promptflow/_core/tools_manager.py index bb66e4195a4..7a0d44fe097 100644 --- a/src/promptflow/promptflow/_core/tools_manager.py +++ b/src/promptflow/promptflow/_core/tools_manager.py @@ -233,15 +233,25 @@ def load_builtin( tool: Tool, node_inputs: Optional[dict] = None, ) -> Tuple[Callable, dict]: - return BuiltinsManager._load_package_tool(tool.name, tool.module, tool.class_name, tool.function, node_inputs) + return BuiltinsManager._load_package_tool( + tool.name, tool.module, tool.class_name, tool.function, node_inputs + ) + + @staticmethod + def _load_package_tool(tool_name, module_name, class_name, method_name, node_inputs): + module = importlib.import_module(module_name) + return BuiltinsManager._load_tool_from_module( + module, tool_name, module_name, class_name, method_name, node_inputs + ) @staticmethod - def _load_package_tool(tool_name, module_name, class_name, method_name, node_inputs: Mapping[str, InputAssignment]): - """Load package in tool with given import path and node inputs.""" - m = importlib.import_module(module_name) + def _load_tool_from_module( + module, tool_name, module_name, class_name, method_name, node_inputs: Mapping[str, InputAssignment] + ): + """Load tool from given module with node inputs.""" if class_name is None: - return getattr(m, method_name), {} - provider_class = getattr(m, class_name) + return getattr(module, method_name), {} + provider_class = getattr(module, class_name) # Note: v -- type is InputAssignment init_inputs = provider_class.get_initialize_inputs() init_inputs_values = {} @@ -392,7 +402,7 @@ def load_tool_for_node(self, node: Node) -> Tool: if node.source.type == ToolSourceType.Package: return self.load_tool_for_package_node(node) elif node.source.type == ToolSourceType.Code: - _, _, tool = self.load_tool_for_script_node(node) + _, tool = self.load_tool_for_script_node(node) return tool raise NotImplementedError(f"Tool source type {node.source.type} for python tool is not supported yet.") elif node.type == ToolType.CUSTOM_LLM: @@ -418,8 +428,8 @@ def load_tool_for_script_node(self, node: Node) -> Tuple[types.ModuleType, Calla m = load_python_module_from_file(self._working_dir / path) if m is None: raise CustomToolSourceLoadError(f"Cannot load module from {path}.") - f = collect_tool_function_in_module(m) - return m, f, _parse_tool_from_function(f, gen_custom_type_conn=True) + f, init_inputs = collect_tool_function_in_module(m) + return m, _parse_tool_from_function(f, init_inputs, gen_custom_type_conn=True) def load_tool_for_llm_node(self, node: Node) -> Tool: api_name = f"{node.provider}.{node.api}" diff --git a/src/promptflow/promptflow/_sdk/_load_functions.py b/src/promptflow/promptflow/_sdk/_load_functions.py index dde327ddd77..51f5b66100f 100644 --- a/src/promptflow/promptflow/_sdk/_load_functions.py +++ b/src/promptflow/promptflow/_sdk/_load_functions.py @@ -64,15 +64,37 @@ def load_flow( source: Union[str, PathLike, IO[AnyStr]], **kwargs, ): + """Load flow from YAML file. + + :param source: The local yaml source of a flow. Must be either a path to a local file. + If the source is a path, it will be open and read. + An exception is raised if the file does not exist. + :type source: Union[PathLike, str] + :return: A Flow object + :rtype: Flow + """ return ProtectedFlow.load(source, **kwargs) def load_run( source: Union[str, PathLike, IO[AnyStr]], + params_override: Optional[list] = None, **kwargs, ): + """Load run from YAML file. + + :param source: The local yaml source of a run. Must be either a path to a local file. + If the source is a path, it will be open and read. + An exception is raised if the file does not exist. + :type source: Union[PathLike, str] + :param params_override: Fields to overwrite on top of the yaml file. + Format is [{"field1": "value1"}, {"field2": "value2"}] + :type params_override: List[Dict] + :return: A Run object + :rtype: Run + """ data = load_yaml(source=source) - return Run._load(data=data, yaml_path=source, **kwargs) + return Run._load(data=data, yaml_path=source, params_override=params_override, **kwargs) def load_connection( diff --git a/src/promptflow/promptflow/_sdk/data/executable/main.py.jinja2 b/src/promptflow/promptflow/_sdk/data/executable/main.py.jinja2 index ad006581f10..92df88619b8 100644 --- a/src/promptflow/promptflow/_sdk/data/executable/main.py.jinja2 +++ b/src/promptflow/promptflow/_sdk/data/executable/main.py.jinja2 @@ -127,13 +127,19 @@ def start(): def submit(**kwargs) -> None: st.session_state.messages.append(("user", kwargs)) - st.session_state.history.append({"input": kwargs}) + session_state_history = dict() + session_state_history.update({"inputs": kwargs}) with container: render_message("user", kwargs) # Force append chat history to kwargs - response = run_flow({"chat_history": get_chat_history_from_session(), **kwargs}) +{% if is_chat_flow %} +{{ ' ' * indent_level * 2 }}response = run_flow({'{{chat_history_input_name}}': get_chat_history_from_session(), **kwargs}) +{% else %} +{{ ' ' * indent_level * 2 }}response = run_flow(kwargs) +{% endif %} st.session_state.messages.append(("assistant", response)) - st.session_state.history.append({"output": kwargs}) + session_state_history.update({"outputs": response}) + st.session_state.history.append(session_state_history) with container: render_message("assistant", response) @@ -209,15 +215,15 @@ def start(): os.environ[environment_variable] = secret_input {% for flow_input, (default_value, value_type) in flow_inputs.items() %} -{% if flow_input != "chat_history" %} {% if value_type == "list" %} {{ ' ' * indent_level * 2 }}st.text('{{flow_input}}') {{ ' ' * indent_level * 2 }}{{flow_input}} = st_quill(html=True, toolbar=["image"], key='{{flow_input}}') {% elif value_type == "image" %} {{ ' ' * indent_level * 2 }}{{flow_input}} = st.file_uploader(label='{{flow_input}}') -{% else %} +{% elif value_type == "string" %} {{ ' ' * indent_level * 2 }}{{flow_input}} = st.text_input(label='{{flow_input}}', placeholder='{{default_value}}') -{% endif %} +{% else %} +{{ ' ' * indent_level * 2 }}{{flow_input}} = st.text_input(label='{{flow_input}}', placeholder={{default_value}}) {% endif %} {% endfor %} diff --git a/src/promptflow/promptflow/_sdk/operations/_flow_operations.py b/src/promptflow/promptflow/_sdk/operations/_flow_operations.py index 570d389c6c2..a3d025fec34 100644 --- a/src/promptflow/promptflow/_sdk/operations/_flow_operations.py +++ b/src/promptflow/promptflow/_sdk/operations/_flow_operations.py @@ -377,7 +377,8 @@ def _build_as_executable( import streamlit_quill # noqa: F401 except ImportError as ex: raise UserErrorException( - f"Please install PyInstaller, streamlit and streamlit_quill for building " f"executable, {ex.msg}." + f"Please install PyInstaller, streamlit and streamlit_quill for building " f"executable, {ex.msg}. " + f"You can try 'pip install promptflow[executable]' to install them." ) from promptflow.contracts.flow import Flow as ExecutableFlow @@ -400,10 +401,12 @@ def _build_as_executable( runtime_interpreter_path = (Path(streamlit.__file__).parent / "runtime").as_posix() executable = ExecutableFlow.from_yaml(flow_file=Path(flow_dag_path.name), working_dir=flow_dag_path.parent) - flow_inputs = {flow_input: (value.default, value.type.value) for flow_input, value in executable.inputs.items()} + flow_inputs = {flow_input: (value.default, value.type.value) for flow_input, value in executable.inputs.items() + if not value.is_chat_history} flow_inputs_params = ["=".join([flow_input, flow_input]) for flow_input, _ in flow_inputs.items()] flow_inputs_params = ",".join(flow_inputs_params) + is_chat_flow, chat_history_input_name, _ = self._is_chat_flow(executable) copy_tree_respect_template_and_ignore_file( source=Path(__file__).parent.parent / "data" / "executable", target=output_dir, @@ -414,17 +417,16 @@ def _build_as_executable( "flow_inputs": flow_inputs, "flow_inputs_params": flow_inputs_params, "flow_path": None, + "is_chat_flow": is_chat_flow, + "chat_history_input_name": chat_history_input_name }, ) - try: - current_directory = os.getcwd() - os.chdir(output_dir.as_posix()) + self._run_pyinstaller(output_dir) + + def _run_pyinstaller(self, output_dir): + with _change_working_dir(output_dir, mkdir=False): subprocess.run(["pyinstaller", "app.spec"], check=True) print("PyInstaller command executed successfully.") - except subprocess.CalledProcessError as e: - print(f"Error running PyInstaller: {e}") - finally: - os.chdir(current_directory) @monitor_operation(activity_name="pf.flows.build", activity_type=ActivityType.PUBLICAPI) def build( diff --git a/src/promptflow/promptflow/_sdk/operations/_local_storage_operations.py b/src/promptflow/promptflow/_sdk/operations/_local_storage_operations.py index f9c916fcbe1..aab1ea13312 100644 --- a/src/promptflow/promptflow/_sdk/operations/_local_storage_operations.py +++ b/src/promptflow/promptflow/_sdk/operations/_local_storage_operations.py @@ -36,6 +36,7 @@ from promptflow.contracts.run_info import RunInfo as NodeRunInfo from promptflow.contracts.run_info import Status from promptflow.contracts.run_mode import RunMode +from promptflow.executor._result import LineResult from promptflow.executor.flow_executor import BulkResult from promptflow.storage import AbstractRunStorage @@ -75,15 +76,18 @@ def get_initializer(self): def __enter__(self): log_path = Path(self.log_path) log_path.parent.mkdir(parents=True, exist_ok=True) - if log_path.exists(): - # Clean up previous log content - try: - with open(log_path, mode="w", encoding=DEFAULT_ENCODING) as file: - file.truncate(0) - except Exception as e: - logger.warning(f"Failed to clean up the previous log content because {e}") - else: + if self.run_mode == RunMode.Batch: log_path.touch(exist_ok=True) + else: + if log_path.exists(): + # for non batch run, clean up previous log content + try: + with open(log_path, mode="w", encoding=DEFAULT_ENCODING) as file: + file.truncate(0) + except Exception as e: + logger.warning(f"Failed to clean up the previous log content because {e}") + else: + log_path.touch() for _logger in self._get_execute_loggers_list(): for handler in _logger.handlers: @@ -239,7 +243,14 @@ def load_io_spec(self) -> Tuple[Dict[str, Dict[str, str]], Dict[str, Dict[str, s flow_dag = yaml.safe_load(f) return flow_dag["inputs"], flow_dag["outputs"] - def dump_inputs(self, inputs: RunInputs) -> None: + def dump_inputs(self, line_results: List[LineResult]) -> None: + inputs = [] + for line_result in line_results: + try: + inputs.append(line_result.run_info.inputs) + except Exception: + # ignore when single line doesn't have inputs + pass df = pd.DataFrame(inputs) with open(self._inputs_path, mode="w", encoding=DEFAULT_ENCODING) as f: # policy: http://policheck.azurewebsites.net/Pages/TermInfo.aspx?LCID=9&TermID=203588 @@ -389,6 +400,7 @@ def persist_result(self, result: Optional[BulkResult]) -> None: return self.dump_outputs(result.outputs) self.dump_metrics(result.metrics) + self.dump_inputs(result.line_results) @staticmethod def _prepare_folder(path: Union[str, Path]) -> Path: diff --git a/src/promptflow/promptflow/_sdk/operations/_run_submitter.py b/src/promptflow/promptflow/_sdk/operations/_run_submitter.py index c98b32666fa..14058fc938b 100644 --- a/src/promptflow/promptflow/_sdk/operations/_run_submitter.py +++ b/src/promptflow/promptflow/_sdk/operations/_run_submitter.py @@ -330,8 +330,7 @@ def _submit_bulk_run(self, flow: Flow, run: Run, local_storage: LocalStorageOper # persist snapshot and result # snapshot: flow directory and (mapped) inputs local_storage.dump_snapshot(flow) - local_storage.dump_inputs(mapped_inputs) - # result: outputs and metrics + # persist inputs, outputs and metrics local_storage.persist_result(bulk_result) # exceptions local_storage.dump_exception(exception=exception, bulk_results=bulk_result) diff --git a/src/promptflow/promptflow/_sdk/operations/_tool_operations.py b/src/promptflow/promptflow/_sdk/operations/_tool_operations.py index a4842b50db7..ec971546534 100644 --- a/src/promptflow/promptflow/_sdk/operations/_tool_operations.py +++ b/src/promptflow/promptflow/_sdk/operations/_tool_operations.py @@ -2,14 +2,18 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- import inspect +import io import json from dataclasses import asdict from os import PathLike +from pathlib import Path from typing import Union from promptflow._core.tool_meta_generator import is_tool from promptflow._core.tools_manager import collect_package_tools +from promptflow._utils.multimedia_utils import convert_multimedia_data_to_base64 from promptflow._utils.tool_utils import function_to_interface +from promptflow.contracts.multimedia import Image from promptflow.contracts.tool import Tool, ToolType from promptflow.exceptions import UserErrorException @@ -20,15 +24,13 @@ class ToolOperations: def generate_tool_meta(self, tool_module): tool_functions = self._collect_tool_functions_in_module(tool_module) tool_methods = self._collect_tool_class_methods_in_module(tool_module) - tools = [self._parse_tool_from_function(f) for f in tool_functions] + [ - self._parse_tool_from_function(f, initialize_inputs) for (f, initialize_inputs) in tool_methods - ] - construct_tools = { - f"{t.module}.{t.class_name}.{t.function}" - if t.class_name is not None - else f"{t.module}.{t.function}": asdict(t, dict_factory=lambda x: {k: v for (k, v) in x if v}) - for t in tools - } + construct_tools = {} + for f in tool_functions: + tool_name, construct_tool = self._serialize_tool(f) + construct_tools[tool_name] = construct_tool + for (f, initialize_inputs) in tool_methods: + tool_name, construct_tool = self._serialize_tool(f, initialize_inputs) + construct_tools[tool_name] = construct_tool # The generated dict cannot be dumped as yaml directly since yaml cannot handle string enum. return json.loads(json.dumps(construct_tools)) @@ -62,6 +64,7 @@ def _parse_tool_from_function(f, initialize_inputs=None): tool_type = getattr(f, "__type") or ToolType.PYTHON tool_name = getattr(f, "__name") description = getattr(f, "__description") + extra_info = getattr(f, "__extra_info") if getattr(f, "__tool", None) and isinstance(f.__tool, Tool): return getattr(f, "__tool") if hasattr(f, "__original_function"): @@ -74,7 +77,7 @@ def _parse_tool_from_function(f, initialize_inputs=None): if "." in f.__qualname__: class_name = f.__qualname__.replace(f".{f.__name__}", "") # Construct the Tool structure - return Tool( + tool = Tool( name=tool_name or f.__qualname__, description=description or inspect.getdoc(f), inputs=inputs, @@ -83,6 +86,52 @@ def _parse_tool_from_function(f, initialize_inputs=None): function=f.__name__, module=f.__module__, ) + return tool, extra_info + + def _serialize_tool(self, tool_func, initialize_inputs=None): + """ + Serialize tool obj to dict. + + :param tool_func: Package tool function + :type tool_func: callable + :param initialize_inputs: Initialize inputs of package tool + :type initialize_inputs: Dict[str, obj] + :return: package tool name, serialized tool + :rtype: str, Dict[str, str] + """ + tool, extra_info = self._parse_tool_from_function(tool_func, initialize_inputs) + tool_name = ( + f"{tool.module}.{tool.class_name}.{tool.function}" + if tool.class_name is not None + else f"{tool.module}.{tool.function}" + ) + construct_tool = asdict(tool, dict_factory=lambda x: {k: v for (k, v) in x if v}) + if extra_info: + if "icon" in extra_info: + if not Path(extra_info["icon"]).exists(): + raise UserErrorException(f"Cannot find the icon path {extra_info['icon']}.") + extra_info["icon"] = self._serialize_image_data(extra_info["icon"]) + construct_tool.update(extra_info) + return tool_name, construct_tool + + @staticmethod + def _serialize_image_data(image_path): + """Serialize image to base64.""" + from PIL import Image as PIL_Image + + with open(image_path, "rb") as image_file: + # Create a BytesIO object from the image file + image_data = io.BytesIO(image_file.read()) + + # Open the image and resize it + img = PIL_Image.open(image_data) + if img.size != (16, 16): + img = img.resize((16, 16), PIL_Image.Resampling.LANCZOS) + buffered = io.BytesIO() + img.save(buffered, format="PNG") + icon_image = Image(buffered.getvalue(), mime_type="image/png") + image_url = convert_multimedia_data_to_base64(icon_image, with_type=True) + return image_url def list( self, diff --git a/src/promptflow/promptflow/_utils/connection_utils.py b/src/promptflow/promptflow/_utils/connection_utils.py index 5ea3cb6ff4d..00f16d83e1d 100644 --- a/src/promptflow/promptflow/_utils/connection_utils.py +++ b/src/promptflow/promptflow/_utils/connection_utils.py @@ -116,9 +116,9 @@ def extract_comments_mapping(keys, doc): type_pattern = rf":type {key}: (.*)" key_type = " ".join(re.findall(type_pattern, doc)).rstrip(".") if key_type and key_description: - comments_map[key] = ", ".join([key_type, key_description]) + comments_map[key] = " ".join([key_type + " type.", key_description]) elif key_type: - comments_map[key] = key_type + comments_map[key] = key_type + " type." elif key_description: comments_map[key] = key_description except re.error: diff --git a/src/promptflow/promptflow/_utils/multimedia_utils.py b/src/promptflow/promptflow/_utils/multimedia_utils.py index 96821298630..247fd083913 100644 --- a/src/promptflow/promptflow/_utils/multimedia_utils.py +++ b/src/promptflow/promptflow/_utils/multimedia_utils.py @@ -31,7 +31,7 @@ def _get_extension_from_mime_type(mime_type: str): return ext -def _is_multimedia_dict(multimedia_dict: dict): +def is_multimedia_dict(multimedia_dict: dict): if len(multimedia_dict) != 1: return False key = list(multimedia_dict.keys())[0] @@ -96,7 +96,7 @@ def _create_image_from_dict(image_dict: dict): for k, v in image_dict.items(): format, resource = _get_multimedia_info(k) if resource == "path": - return _create_image_from_file(v, mime_type=f"image/{format}") + return _create_image_from_file(Path(v), mime_type=f"image/{format}") elif resource == "base64": return _create_image_from_base64(v, mime_type=f"image/{format}") elif resource == "url": @@ -122,7 +122,7 @@ def create_image(value: any): if isinstance(value, PFBytes): return value elif isinstance(value, dict): - if _is_multimedia_dict(value): + if is_multimedia_dict(value): return _create_image_from_dict(value) else: raise InvalidImageInput( @@ -176,10 +176,7 @@ def persist_multimedia_data(value: Any, base_dir: Path, sub_dir: Path = None): def convert_multimedia_data_to_base64(value: Any, with_type=False): - func = ( - lambda x: f"data:{x._mime_type};base64," + PFBytes.to_base64(x) if with_type else PFBytes.to_base64 - ) # noqa: E731 - to_base64_funcs = {PFBytes: func} + to_base64_funcs = {PFBytes: partial(PFBytes.to_base64, **{"with_type": with_type})} return recursive_process(value, process_funcs=to_base64_funcs) @@ -210,7 +207,7 @@ def load_multimedia_data_recursively(value: Any): if isinstance(value, list): return [load_multimedia_data_recursively(item) for item in value] elif isinstance(value, dict): - if _is_multimedia_dict(value): + if is_multimedia_dict(value): return _create_image_from_dict(value) else: return {k: load_multimedia_data_recursively(v) for k, v in value.items()} diff --git a/src/promptflow/promptflow/azure/_constants/_flow.py b/src/promptflow/promptflow/azure/_constants/_flow.py index 6f01077bc9b..01876c78262 100644 --- a/src/promptflow/promptflow/azure/_constants/_flow.py +++ b/src/promptflow/promptflow/azure/_constants/_flow.py @@ -21,6 +21,7 @@ class FlowJobType: CLOUD_RUNS_PAGE_SIZE = 25 # align with UX SESSION_CREATION_TIMEOUT_SECONDS = 10 * 60 # 10 minutes +SESSION_CREATION_TIMEOUT_ENV_VAR = "PROMPTFLOW_SESSION_CREATION_TIMEOUT_SECONDS" PYTHON_REQUIREMENTS_TXT = "python_requirements_txt" BASE_IMAGE = "image" diff --git a/src/promptflow/promptflow/azure/_restclient/flow_service_caller.py b/src/promptflow/promptflow/azure/_restclient/flow_service_caller.py index 4f11ec2d44d..5fb95ea77c9 100644 --- a/src/promptflow/promptflow/azure/_restclient/flow_service_caller.py +++ b/src/promptflow/promptflow/azure/_restclient/flow_service_caller.py @@ -14,7 +14,7 @@ from azure.core.pipeline.policies import RetryPolicy from promptflow._telemetry.telemetry import TelemetryMixin -from promptflow.azure._constants._flow import AUTOMATIC_RUNTIME +from promptflow.azure._constants._flow import AUTOMATIC_RUNTIME, SESSION_CREATION_TIMEOUT_ENV_VAR from promptflow.azure._restclient.flow import AzureMachineLearningDesignerServiceClient from promptflow.exceptions import ValidationException, UserErrorException, PromptflowException @@ -500,11 +500,24 @@ def create_flow_session( sleep_period = 5 status = None timeout_seconds = SESSION_CREATION_TIMEOUT_SECONDS + # polling timeout, if user set SESSION_CREATION_TIMEOUT_SECONDS in environment var, use it + if os.environ.get(SESSION_CREATION_TIMEOUT_ENV_VAR): + try: + timeout_seconds = float(os.environ.get(SESSION_CREATION_TIMEOUT_ENV_VAR)) + except ValueError: + raise UserErrorException( + "Environment variable {} with value {} set but failed to parse. " + "Please reset the value to a number.".format( + SESSION_CREATION_TIMEOUT_ENV_VAR, os.environ.get(SESSION_CREATION_TIMEOUT_ENV_VAR) + ) + ) # InProgress is only known non-terminal status for now. while status in [None, "InProgress"]: if time_run + sleep_period > timeout_seconds: - message = f"Timeout for session {action} {session_id} for {AUTOMATIC_RUNTIME}.\n" \ - "Please resubmit the flow later." + message = f"Polling timeout for session {session_id} {action} " \ + f"for {AUTOMATIC_RUNTIME} after {timeout_seconds} seconds.\n" \ + f"To proceed the {action} for {AUTOMATIC_RUNTIME}, you can retry using the same flow, " \ + "and we will continue polling status of previous session. \n" raise Exception(message) time_run += sleep_period time.sleep(sleep_period) diff --git a/src/promptflow/promptflow/azure/operations/_artifact_utilities.py b/src/promptflow/promptflow/azure/operations/_artifact_utilities.py index 0a1cf2ec51a..b93375ff1b6 100644 --- a/src/promptflow/promptflow/azure/operations/_artifact_utilities.py +++ b/src/promptflow/promptflow/azure/operations/_artifact_utilities.py @@ -356,6 +356,56 @@ def _update_gen2_metadata(name, version, indicator_file, storage_client) -> None T = TypeVar("T", bound=Artifact) +def _check_and_upload_path( + artifact: T, + asset_operations: Union["DataOperations", "ModelOperations", "CodeOperations", "FeatureSetOperations"], + artifact_type: str, + datastore_name: Optional[str] = None, + sas_uri: Optional[str] = None, + show_progress: bool = True, +): + """Checks whether `artifact` is a path or a uri and uploads it to the datastore if necessary. + param T artifact: artifact to check and upload param + Union["DataOperations", "ModelOperations", "CodeOperations"] + asset_operations: the asset operations to use for uploading + param str datastore_name: the name of the datastore to upload to + param str sas_uri: the sas uri to use for uploading + """ + from azure.ai.ml._utils.utils import is_mlflow_uri, is_url + + datastore_name = artifact.datastore + if ( + hasattr(artifact, "local_path") + and artifact.local_path is not None + or ( + hasattr(artifact, "path") + and artifact.path is not None + and not (is_url(artifact.path) or is_mlflow_uri(artifact.path)) + ) + ): + path = ( + Path(artifact.path) + if hasattr(artifact, "path") and artifact.path is not None + else Path(artifact.local_path) + ) + if not path.is_absolute(): + path = Path(artifact.base_path, path).resolve() + uploaded_artifact = _upload_to_datastore( + asset_operations._operation_scope, + asset_operations._datastore_operation, + path, + datastore_name=datastore_name, + asset_name=artifact.name, + asset_version=str(artifact.version), + asset_hash=artifact._upload_hash if hasattr(artifact, "_upload_hash") else None, + sas_uri=sas_uri, + artifact_type=artifact_type, + show_progress=show_progress, + ignore_file=getattr(artifact, "_ignore_file", None), + ) + return uploaded_artifact + + def _check_and_upload_env_build_context( environment: Environment, operations: "EnvironmentOperations", diff --git a/src/promptflow/promptflow/azure/operations/_flow_operations.py b/src/promptflow/promptflow/azure/operations/_flow_operations.py index debb642fffb..0e6fb8ccce1 100644 --- a/src/promptflow/promptflow/azure/operations/_flow_operations.py +++ b/src/promptflow/promptflow/azure/operations/_flow_operations.py @@ -179,3 +179,52 @@ def _try_resolve_code_for_flow(cls, flow: Flow, ops: OperationOrchestrator, igno else: raise flow._code_uploaded = True + + # region deprecated but keep for runtime test dependencies + def _resolve_arm_id_or_upload_dependencies_to_file_share(self, flow: Flow) -> None: + ops = OperationOrchestrator(self._all_operations, self._operation_scope, self._operation_config) + # resolve flow's code + self._try_resolve_code_for_flow_to_file_share(flow=flow, ops=ops) + + @classmethod + def _try_resolve_code_for_flow_to_file_share(cls, flow: Flow, ops: OperationOrchestrator) -> None: + from azure.ai.ml._utils._storage_utils import AzureMLDatastorePathUri + + from promptflow.azure._constants._flow import DEFAULT_STORAGE + + from ._artifact_utilities import _check_and_upload_path + + if flow.path: + if flow.path.startswith("azureml://datastores"): + # remote path + + path_uri = AzureMLDatastorePathUri(flow.path) + if path_uri.datastore != DEFAULT_STORAGE: + raise ValueError(f"Only {DEFAULT_STORAGE} is supported as remote storage for now.") + flow.path = path_uri.path + flow._code_uploaded = True + return + else: + raise ValueError("Path is required for flow.") + + with flow._build_code() as code: + if code is None: + return + if flow._code_uploaded: + return + code.datastore = DEFAULT_STORAGE + uploaded_code_asset = _check_and_upload_path( + artifact=code, + asset_operations=ops._code_assets, + artifact_type="Code", + show_progress=False, + ) + if "remote_path" in uploaded_code_asset: + path = uploaded_code_asset["remote_path"] + elif "remote path" in uploaded_code_asset: + path = uploaded_code_asset["remote path"] + flow.code = path + flow.path = (Path(path) / flow.path).as_posix() + flow._code_uploaded = True + + # endregion diff --git a/src/promptflow/promptflow/contracts/multimedia.py b/src/promptflow/promptflow/contracts/multimedia.py index a7de51581c0..0218a0f450d 100644 --- a/src/promptflow/promptflow/contracts/multimedia.py +++ b/src/promptflow/promptflow/contracts/multimedia.py @@ -21,9 +21,10 @@ def __init__(self, data: bytes, mime_type: str): self._hash = hashlib.sha1(data).hexdigest()[:8] self._mime_type = mime_type.lower() - def to_base64(self): + def to_base64(self, with_type: bool = False): """Returns the base64 representation of the PFBytes.""" - + if with_type: + return f"data:{self._mime_type};base64," + base64.b64encode(self).decode("utf-8") return base64.b64encode(self).decode("utf-8") diff --git a/src/promptflow/promptflow/executor/_tool_resolver.py b/src/promptflow/promptflow/executor/_tool_resolver.py index 438f6540cf6..11110b50db2 100644 --- a/src/promptflow/promptflow/executor/_tool_resolver.py +++ b/src/promptflow/promptflow/executor/_tool_resolver.py @@ -256,7 +256,7 @@ def _resolve_llm_connection_to_inputs(self, node: Node, tool: Tool) -> Node: ) def _resolve_script_node(self, node: Node, convert_input_types=False) -> ResolvedTool: - m, f, tool = self._tool_loader.load_tool_for_script_node(node) + m, tool = self._tool_loader.load_tool_for_script_node(node) # We only want to load script tool module once. # Reloading the same module changes the ID of the class, which can cause issues with isinstance() checks. # This is important when working with connection class checks. For instance, in user tool script it writes: @@ -267,7 +267,11 @@ def _resolve_script_node(self, node: Node, convert_input_types=False) -> Resolve # To avoid reloading, pass the loaded module to _convert_node_literal_input_types as an arg. if convert_input_types: node = self._convert_node_literal_input_types(node, tool, m) - return ResolvedTool(node=node, definition=tool, callable=f, init_args={}) + callable, init_args = BuiltinsManager._load_tool_from_module( + m, tool.name, tool.module, tool.class_name, tool.function, node.inputs + ) + self._remove_init_args(node.inputs, init_args) + return ResolvedTool(node=node, definition=tool, callable=callable, init_args=init_args) def _resolve_package_node(self, node: Node, convert_input_types=False) -> ResolvedTool: tool: Tool = self._tool_loader.load_tool_for_package_node(node) diff --git a/src/promptflow/setup.py b/src/promptflow/setup.py index cb429033a4c..6d1a4783f93 100644 --- a/src/promptflow/setup.py +++ b/src/promptflow/setup.py @@ -47,6 +47,7 @@ "opencensus-ext-azure<2.0.0", # configure opencensus to send telemetry to azure monitor "ruamel.yaml>=0.17.35,<0.18.0", # used to generate connection templates with preserved comments "pyarrow>=13.0.0,<14.0.0", # used to read parquet file with pandas.read_parquet + "pillow>=10.1.0,<11.0.0", # used to generate icon data URI for package tool ] setup( @@ -80,7 +81,7 @@ "azure-ai-ml>=1.11.0,<2.0.0", "pyjwt>=2.4.0,<3.0.0", # requirement of control plane SDK ], - "executable": ["pyinstaller", "streamlit>=1.26.0", "streamlit-quill<0.1.0"], + "executable": ["pyinstaller>=5.13.2", "streamlit>=1.26.0", "streamlit-quill<0.1.0"], }, packages=find_packages(), entry_points={ diff --git a/src/promptflow/tests/executor/e2etests/test_executor_happypath.py b/src/promptflow/tests/executor/e2etests/test_executor_happypath.py index 216f29333e2..02cf21bdfef 100644 --- a/src/promptflow/tests/executor/e2etests/test_executor_happypath.py +++ b/src/promptflow/tests/executor/e2etests/test_executor_happypath.py @@ -1,7 +1,5 @@ import uuid -import os from types import GeneratorType -from pathlib import Path import pytest @@ -13,7 +11,6 @@ from promptflow.executor import FlowExecutor from promptflow.executor._errors import ConnectionNotFound, InputTypeError, ResolveToolError from promptflow.executor.flow_executor import BulkResult, LineResult -from promptflow.storage._run_storage import DefaultRunStorage from promptflow.storage import AbstractRunStorage from ..utils import ( @@ -22,7 +19,6 @@ get_flow_expected_status_summary, get_flow_sample_inputs, get_yaml_file, - get_yaml_working_dir ) SAMPLE_FLOW = "web_classification_no_variants" @@ -31,11 +27,6 @@ SAMPLE_FLOW_WITH_LANGCHAIN_TRACES = "flow_with_langchain_traces" -def assert_contains_substrings(s, substrings): - for substring in substrings: - assert substring in s - - class MemoryRunStorage(AbstractRunStorage): def __init__(self): self._node_runs = {} @@ -231,38 +222,6 @@ def test_executor_exec_line(self, flow_folder, dev_connections): assert node_run_info.node == node assert isinstance(node_run_info.api_calls, list) # api calls is set - @pytest.mark.parametrize( - "flow_folder", - [ - "python_tool_with_multiple_image_nodes" - ], - ) - def test_executor_exec_line_with_image(self, flow_folder, dev_connections): - self.skip_serp(flow_folder, dev_connections) - working_dir = get_yaml_working_dir(flow_folder) - os.chdir(working_dir) - storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp")) - executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections, storage=storage) - flow_result = executor.exec_line({}) - assert not executor._run_tracker._flow_runs, "Flow runs in run tracker should be empty." - assert not executor._run_tracker._node_runs, "Node runs in run tracker should be empty." - assert isinstance(flow_result.output, dict) - assert flow_result.run_info.status == Status.Completed - node_count = len(executor._flow.nodes) - assert isinstance(flow_result.run_info.api_calls, list) and len(flow_result.run_info.api_calls) == node_count - substrings = ["data:image/jpg;path", ".jpg"] - for i in range(node_count): - assert_contains_substrings(str(flow_result.run_info.api_calls[i]), substrings) - assert len(flow_result.node_run_infos) == node_count - for node, node_run_info in flow_result.node_run_infos.items(): - assert node_run_info.status == Status.Completed - assert node_run_info.node == node - assert isinstance(node_run_info.api_calls, list) # api calls is set - assert_contains_substrings(str(node_run_info.inputs), substrings) - assert_contains_substrings(str(node_run_info.output), substrings) - assert_contains_substrings(str(node_run_info.result), substrings) - assert_contains_substrings(str(node_run_info.api_calls[0]), substrings) - @pytest.mark.parametrize( "flow_folder, node_name, flow_inputs, dependency_nodes_outputs", [ @@ -294,41 +253,6 @@ def test_executor_exec_node(self, flow_folder, node_name, flow_inputs, dependenc assert run_info.node == node_name assert run_info.system_metrics["duration"] >= 0 - @pytest.mark.parametrize( - "flow_folder, node_name, flow_inputs, dependency_nodes_outputs", - [ - ("python_tool_with_multiple_image_nodes", "python_node_2", {"logo_content": "Microsoft and four squares"}, - {"python_node": {"image": {"data:image/jpg;path": "logo.jpg"}, "image_name": "Microsoft's logo", - "image_list": [{"data:image/jpg;path": "logo.jpg"}]}}), - ("python_tool_with_multiple_image_nodes", "python_node", { - "image": "logo.jpg", "image_name": "Microsoft's logo"}, {},) - ], - ) - def test_executor_exec_node_with_image(self, flow_folder, node_name, flow_inputs, dependency_nodes_outputs, - dev_connections): - self.skip_serp(flow_folder, dev_connections) - yaml_file = get_yaml_file(flow_folder) - working_dir = get_yaml_working_dir(flow_folder) - os.chdir(working_dir) - run_info = FlowExecutor.load_and_exec_node( - yaml_file, - node_name, - flow_inputs=flow_inputs, - dependency_nodes_outputs=dependency_nodes_outputs, - connections=dev_connections, - output_sub_dir=("./temp"), - raise_ex=True, - ) - substrings = ["data:image/jpg;path", "temp", ".jpg"] - assert_contains_substrings(str(run_info.inputs), substrings) - assert_contains_substrings(str(run_info.output), substrings) - assert_contains_substrings(str(run_info.result), substrings) - assert_contains_substrings(str(run_info.api_calls[0]), substrings) - assert run_info.status == Status.Completed - assert isinstance(run_info.api_calls, list) - assert run_info.node == node_name - assert run_info.system_metrics["duration"] >= 0 - def test_executor_node_overrides(self, dev_connections): inputs = self.get_line_inputs() executor = FlowExecutor.create( @@ -467,3 +391,9 @@ def test_bulk_run_line_result(self, flow_folder, batch_input, expected_type, val validate_inputs=validate_inputs, ) assert type(bulk_result.line_results[0].run_info.inputs["text"]) is expected_type + + def test_executor_for_script_tool_with_init(self, dev_connections): + executor = FlowExecutor.create(get_yaml_file("script_tool_with_init"), dev_connections) + flow_result = executor.exec_line({"input": "World"}) + assert flow_result.run_info.status == Status.Completed + assert flow_result.output["output"] == "Hello World" diff --git a/src/promptflow/tests/executor/e2etests/test_executor_with_image.py b/src/promptflow/tests/executor/e2etests/test_executor_with_image.py new file mode 100644 index 00000000000..09ec9733052 --- /dev/null +++ b/src/promptflow/tests/executor/e2etests/test_executor_with_image.py @@ -0,0 +1,159 @@ +import os +from pathlib import Path + +import pytest + +from promptflow._utils.multimedia_utils import _create_image_from_file, is_multimedia_dict +from promptflow.contracts.multimedia import Image +from promptflow.contracts.run_info import Status +from promptflow.executor import FlowExecutor +from promptflow.storage._run_storage import DefaultRunStorage + +from ..utils import FLOW_ROOT, get_yaml_file, get_yaml_working_dir + +SIMPLE_IMAGE_FLOW = "python_tool_with_simple_image" +COMPOSITE_IMAGE_FLOW = "python_tool_with_composite_image" +CHAT_FLOW_WITH_IMAGE = "chat_flow_with_image" +SIMPLE_IMAGE_FLOW_PATH = FLOW_ROOT / SIMPLE_IMAGE_FLOW +COMPOSITE_IMAGE_FLOW_PATH = FLOW_ROOT / COMPOSITE_IMAGE_FLOW +CHAT_FLOW_WITH_IMAGE_PATH = FLOW_ROOT / CHAT_FLOW_WITH_IMAGE +IMAGE_URL = ( + "https://github.com/microsoft/promptflow/blob/93776a0631abf991896ab07d294f62082d5df3f3/src" + "/promptflow/tests/test_configs/datas/test_image.jpg?raw=true" +) + + +def get_test_cases_for_simple_input(): + image = _create_image_from_file(SIMPLE_IMAGE_FLOW_PATH / "logo.jpg") + inputs = [ + {"data:image/jpg;path": str(SIMPLE_IMAGE_FLOW_PATH / "logo.jpg")}, + {"data:image/jpg;base64": image.to_base64()}, + {"data:image/jpg;url": IMAGE_URL}, + str(SIMPLE_IMAGE_FLOW_PATH / "logo.jpg"), + image.to_base64(), + IMAGE_URL, + ] + return [(SIMPLE_IMAGE_FLOW, {"image": input}) for input in inputs] + + +def get_test_cases_for_composite_input(): + image_1 = _create_image_from_file(COMPOSITE_IMAGE_FLOW_PATH / "logo.jpg") + image_2 = _create_image_from_file(COMPOSITE_IMAGE_FLOW_PATH / "logo_2.png") + inputs = [ + [ + {"data:image/jpg;path": str(COMPOSITE_IMAGE_FLOW_PATH / "logo.jpg")}, + {"data:image/png;path": str(COMPOSITE_IMAGE_FLOW_PATH / "logo_2.png")} + ], + [{"data:image/jpg;base64": image_1.to_base64()}, {"data:image/png;base64": image_2.to_base64()}], + [{"data:image/jpg;url": IMAGE_URL}, {"data:image/png;url": IMAGE_URL}], + ] + return [ + (COMPOSITE_IMAGE_FLOW, {"image_list": input, "image_dict": {"image_1": input[0], "image_2": input[1]}}) + for input in inputs + ] + + +def get_test_cases_for_node_run(): + image = {"data:image/jpg;path": str(SIMPLE_IMAGE_FLOW_PATH / "logo.jpg")} + simple_image_input = {"image": image} + image_list = [{"data:image/jpg;path": "logo.jpg"}, {"data:image/png;path": "logo_2.png"}] + image_dict = { + "image_dict": { + "image_1": {"data:image/jpg;path": "logo.jpg"}, + "image_2": {"data:image/png;path": "logo_2.png"}, + } + } + composite_image_input = {"image_list": image_list, "image_dcit": image_dict} + + return [ + (SIMPLE_IMAGE_FLOW, "python_node", simple_image_input, None), + (SIMPLE_IMAGE_FLOW, "python_node_2", simple_image_input, {"python_node": image}), + (COMPOSITE_IMAGE_FLOW, "python_node", composite_image_input, None), + (COMPOSITE_IMAGE_FLOW, "python_node_2", composite_image_input, None), + ( + COMPOSITE_IMAGE_FLOW, "python_node_3", composite_image_input, + {"python_node": image_list, "python_node_2": image_dict} + ), + ] + + +def assert_contain_image_reference(value): + assert not isinstance(value, Image) + if isinstance(value, list): + for item in value: + assert_contain_image_reference(item) + elif isinstance(value, dict): + if is_multimedia_dict(value): + path = list(value.values())[0] + assert isinstance(path, str) + assert path.endswith(".jpg") or path.endswith(".jpeg") or path.endswith(".png") + else: + for _, v in value.items(): + assert_contain_image_reference(v) + + +def assert_contain_image_object(value): + if isinstance(value, list): + for item in value: + assert_contain_image_object(item) + elif isinstance(value, dict): + assert not is_multimedia_dict(value) + for _, v in value.items(): + assert_contain_image_object(v) + else: + assert isinstance(value, Image) + + +@pytest.mark.usefixtures("dev_connections") +@pytest.mark.e2etest +class TestExecutorWithImage: + @pytest.mark.parametrize( + "flow_folder, inputs", get_test_cases_for_simple_input() + get_test_cases_for_composite_input() + ) + def test_executor_exec_line_with_image(self, flow_folder, inputs, dev_connections): + working_dir = get_yaml_working_dir(flow_folder) + os.chdir(working_dir) + storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp")) + executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections, storage=storage) + flow_result = executor.exec_line(inputs) + assert isinstance(flow_result.output, dict) + assert_contain_image_object(flow_result.output) + assert flow_result.run_info.status == Status.Completed + assert_contain_image_reference(flow_result.run_info) + for _, node_run_info in flow_result.node_run_infos.items(): + assert node_run_info.status == Status.Completed + assert_contain_image_reference(node_run_info) + + def test_executor_exec_line_with_chat_flow(self, dev_connections): + flow_folder = CHAT_FLOW_WITH_IMAGE + working_dir = get_yaml_working_dir(flow_folder) + os.chdir(working_dir) + storage = DefaultRunStorage(base_dir=working_dir, sub_dir=Path("./temp")) + executor = FlowExecutor.create(get_yaml_file(flow_folder), dev_connections, storage=storage) + flow_result = executor.exec_line({}) + assert isinstance(flow_result.output, dict) + assert_contain_image_object(flow_result.output) + assert flow_result.run_info.status == Status.Completed + assert_contain_image_reference(flow_result.run_info) + for _, node_run_info in flow_result.node_run_infos.items(): + assert node_run_info.status == Status.Completed + assert_contain_image_reference(node_run_info) + + @pytest.mark.parametrize( + "flow_folder, node_name, flow_inputs, dependency_nodes_outputs", get_test_cases_for_node_run() + ) + def test_executor_exec_node_with_image(self, flow_folder, node_name, flow_inputs, dependency_nodes_outputs, + dev_connections): + working_dir = get_yaml_working_dir(flow_folder) + os.chdir(working_dir) + run_info = FlowExecutor.load_and_exec_node( + get_yaml_file(flow_folder), + node_name, + flow_inputs=flow_inputs, + dependency_nodes_outputs=dependency_nodes_outputs, + connections=dev_connections, + output_sub_dir=("./temp"), + raise_ex=True, + ) + assert run_info.status == Status.Completed + assert_contain_image_reference(run_info) diff --git a/src/promptflow/tests/executor/unittests/_core/test_tools_manager.py b/src/promptflow/tests/executor/unittests/_core/test_tools_manager.py index 77b784ce0ef..bba04ce5444 100644 --- a/src/promptflow/tests/executor/unittests/_core/test_tools_manager.py +++ b/src/promptflow/tests/executor/unittests/_core/test_tools_manager.py @@ -191,9 +191,9 @@ def test_collect_package_tools_and_connections(self, install_custom_tool_pkg): package: test-custom-tools package_version: 0.0.2 configs: - api_url: "This is a fake api url." # String, The api url. + api_url: "This is a fake api url." # String type. The api url. secrets: # must-have - api_key: "to_replace_with_api_key" # String, The api key. + api_key: "to_replace_with_api_key" # String type. The api key. """ content = templates["my_tool_package.tools.my_tool_with_custom_strong_type_connection.MyCustomConnection"] diff --git a/src/promptflow/tests/executor/unittests/_utils/test_connection_utils.py b/src/promptflow/tests/executor/unittests/_utils/test_connection_utils.py index a17677f910f..3d6f76ed909 100644 --- a/src/promptflow/tests/executor/unittests/_utils/test_connection_utils.py +++ b/src/promptflow/tests/executor/unittests/_utils/test_connection_utils.py @@ -62,12 +62,12 @@ class TestConnectionUtils: ( MyCustomConnectionWithInvalidComments, [ - 'api_base: "to_replace_with_api_base" # String, The api base.\n', - 'api_key: "to_replace_with_api_key" # String, The api key.\n', + 'api_base: "to_replace_with_api_base" # String type. The api base.\n', + 'api_key: "to_replace_with_api_key" # String type. The api key.\n', ], ), (MyCustomConnectionMissingTypeComments, ['api_key: "to_replace_with_api_key" # The api key.']), - (MyCustomConnectionMissingParamComments, ['api_key: "to_replace_with_api_key" # String']), + (MyCustomConnectionMissingParamComments, ['api_key: "to_replace_with_api_key" # String type.']), ], ) def test_generate_custom_strong_type_connection_template_with_comments(self, cls, expected_str_in_template): diff --git a/src/promptflow/tests/executor/unittests/_utils/test_multimedia_utils.py b/src/promptflow/tests/executor/unittests/_utils/test_multimedia_utils.py new file mode 100644 index 00000000000..cbf39834f07 --- /dev/null +++ b/src/promptflow/tests/executor/unittests/_utils/test_multimedia_utils.py @@ -0,0 +1,127 @@ +import pytest +import re +from pathlib import Path +from unittest.mock import mock_open + +from promptflow._utils.multimedia_utils import ( + _create_image_from_file, + convert_multimedia_data_to_base64, + create_image, + load_multimedia_data, + persist_multimedia_data, +) +from promptflow.contracts._errors import InvalidImageInput +from promptflow.contracts.flow import FlowInputDefinition +from promptflow.contracts.tool import ValueType + +from ...utils import DATA_ROOT + +TEST_IMAGE_PATH = DATA_ROOT / "test_image.jpg" + + +@pytest.mark.unittest +class TestMultimediaUtils: + def test_create_image_with_dict(self, mocker): + ## From path + image_dict = {"data:image/jpg;path": TEST_IMAGE_PATH} + image_from_path = create_image(image_dict) + assert image_from_path._mime_type == "image/jpg" + + ## From base64 + image_dict = {"data:image/jpg;base64": image_from_path.to_base64()} + image_from_base64 = create_image(image_dict) + assert str(image_from_path) == str(image_from_base64) + assert image_from_base64._mime_type == "image/jpg" + + ## From url + mocker.patch("requests.get", return_value=mocker.Mock(content=image_from_path, status_code=200)) + image_dict = {"data:image/jpg;url": ""} + image_from_url = create_image(image_dict) + assert str(image_from_path) == str(image_from_url) + assert image_from_url._mime_type == "image/jpg" + + mocker.patch("requests.get", return_value=mocker.Mock(content=None, status_code=404)) + with pytest.raises(InvalidImageInput) as ex: + create_image(image_dict) + assert "Error while fetching image from URL" in ex.value.message_format + + def test_create_image_with_string(self, mocker): + ## From path + image_from_path = create_image(str(TEST_IMAGE_PATH)) + assert image_from_path._mime_type == "image/jpg" + + # From base64 + image_from_base64 = create_image(image_from_path.to_base64()) + assert str(image_from_path) == str(image_from_base64) + assert image_from_base64._mime_type in ["image/jpg", "image/jpeg"] + + ## From url + mocker.patch("promptflow._utils.multimedia_utils._is_url", return_value=True) + mocker.patch("promptflow._utils.multimedia_utils._is_base64", return_value=False) + mocker.patch("requests.get", return_value=mocker.Mock(content=image_from_path, status_code=200)) + image_from_url = create_image("") + assert str(image_from_path) == str(image_from_url) + assert image_from_url._mime_type in ["image/jpg", "image/jpeg"] + + ## From image + image_from_image = create_image(image_from_path) + assert str(image_from_path) == str(image_from_image) + + def test_create_image_with_invalid_cases(self): + # Test invalid input type + with pytest.raises(InvalidImageInput) as ex: + create_image(0) + assert "Unsupported image input type" in ex.value.message_format + + # Test invalid image dict + with pytest.raises(InvalidImageInput) as ex: + invalid_image_dict = {"invalid_image": "invalid_image"} + create_image(invalid_image_dict) + assert "Invalid image input format" in ex.value.message_format + + def test_persist_multimedia_date(self, mocker): + image = _create_image_from_file(TEST_IMAGE_PATH) + mocker.patch('builtins.open', mock_open()) + data = {"image": image, "images": [image, image, "other_data"], "other_data": "other_data"} + persisted_data = persist_multimedia_data(data, base_dir=Path(__file__).parent) + file_name = re.compile(r"^[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{12}.jpg$") + assert re.match(file_name, persisted_data["image"]["data:image/jpg;path"]) + assert re.match(file_name, persisted_data["images"][0]["data:image/jpg;path"]) + assert re.match(file_name, persisted_data["images"][1]["data:image/jpg;path"]) + + def test_convert_multimedia_date_to_base64(self): + image = _create_image_from_file(TEST_IMAGE_PATH) + data = {"image": image, "images": [image, image, "other_data"], "other_data": "other_data"} + base64_data = convert_multimedia_data_to_base64(data) + assert base64_data == { + "image": image.to_base64(), + "images": [image.to_base64(), image.to_base64(), "other_data"], + "other_data": "other_data", + } + + base64_data = convert_multimedia_data_to_base64(data, with_type=True) + prefix = f"data:{image._mime_type};base64," + assert base64_data == { + "image": prefix + image.to_base64(), + "images": [prefix + image.to_base64(), prefix + image.to_base64(), "other_data"], + "other_data": "other_data", + } + + def test_load_multimedia_data(self): + inputs = { + "image": FlowInputDefinition(type=ValueType.IMAGE), + "images": FlowInputDefinition(type=ValueType.LIST), + "object": FlowInputDefinition(type=ValueType.OBJECT), + } + line_inputs = { + "image": {"data:image/jpg;path": str(TEST_IMAGE_PATH)}, + "images": [{"data:image/jpg;path": str(TEST_IMAGE_PATH)}, {"data:image/jpg;path": str(TEST_IMAGE_PATH)}], + "object": {"image": {"data:image/jpg;path": str(TEST_IMAGE_PATH)}, "other_data": "other_data"} + } + updated_inputs = load_multimedia_data(inputs, line_inputs) + image = _create_image_from_file(TEST_IMAGE_PATH) + assert updated_inputs == { + "image": image, + "images": [image, image], + "object": {"image": image, "other_data": "other_data"} + } diff --git a/src/promptflow/tests/executor/unittests/executor/test_tool_resolver.py b/src/promptflow/tests/executor/unittests/executor/test_tool_resolver.py index e453926c480..1ea32766fb3 100644 --- a/src/promptflow/tests/executor/unittests/executor/test_tool_resolver.py +++ b/src/promptflow/tests/executor/unittests/executor/test_tool_resolver.py @@ -311,15 +311,19 @@ def mock_llm_api_func(prompt: PromptTemplate, **kwargs): assert re.match(pattern, prompt) def test_resolve_script_node(self, mocker): - def mock_python_func(conn: AzureOpenAIConnection, prompt: PromptTemplate, **kwargs): + def mock_python_func(prompt: PromptTemplate, **kwargs): from promptflow.tools.template_rendering import render_template_jinja2 - assert isinstance(conn, AzureOpenAIConnection) return render_template_jinja2(prompt, **kwargs) tool_loader = ToolLoader(working_dir=None) tool = Tool(name="mock", type=ToolType.PYTHON, inputs={"conn": InputDefinition(type=["AzureOpenAIConnection"])}) - mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, mock_python_func, tool)) + mocker.patch.object(tool_loader, "load_tool_for_script_node", return_value=(None, tool)) + + mocker.patch( + "promptflow._core.tools_manager.BuiltinsManager._load_tool_from_module", + return_value=(mock_python_func, {"conn": AzureOpenAIConnection}), + ) connections = {"conn_name": {"type": "AzureOpenAIConnection", "value": {"api_key": "mock", "api_base": "mock"}}} tool_resolver = ToolResolver(working_dir=None, connections=connections) @@ -337,6 +341,7 @@ def mock_python_func(conn: AzureOpenAIConnection, prompt: PromptTemplate, **kwar provider="mock", ) resolved_tool = tool_resolver._resolve_script_node(node, convert_input_types=True) + assert len(resolved_tool.node.inputs) == 2 kwargs = {k: v.value for k, v in resolved_tool.node.inputs.items()} assert resolved_tool.callable(**kwargs) == "Hello World!" diff --git a/src/promptflow/tests/sdk_cli_azure_test/conftest.py b/src/promptflow/tests/sdk_cli_azure_test/conftest.py index fd71557f047..9c3802b00eb 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/conftest.py +++ b/src/promptflow/tests/sdk_cli_azure_test/conftest.py @@ -1,9 +1,16 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- + +import logging import os +import uuid +from concurrent.futures import ThreadPoolExecutor from pathlib import Path +from typing import Callable +from unittest.mock import patch +import jwt import pytest from azure.ai.ml import MLClient from azure.ai.ml.constants._common import AZUREML_RESOURCE_PROVIDER, RESOURCE_ID_FORMAT @@ -16,11 +23,27 @@ from promptflow.azure import PFClient from ._azure_utils import get_cred +from .recording_utilities import ( + PFAzureIntegrationTestRecording, + get_pf_client_for_playback, + is_live, + is_live_and_not_recording, +) FLOWS_DIR = "./tests/test_configs/flows" DATAS_DIR = "./tests/test_configs/datas" +@pytest.fixture +def tenant_id() -> str: + if not is_live(): + return "" + credential = get_cred() + access_token = credential.get_token("https://management.azure.com/.default") + decoded_token = jwt.decode(access_token.token, options={"verify_signature": False}) + return decoded_token["tid"] + + @pytest.fixture def ml_client( default_subscription_id: str, @@ -38,45 +61,51 @@ def ml_client( ) -@pytest.fixture() +@pytest.fixture def remote_client() -> PFClient: - # enable telemetry for CI - with environment_variable_overwrite(TELEMETRY_ENABLED, "true"): - yield PFClient( - credential=get_cred(), - subscription_id="96aede12-2f73-41cb-b983-6d11a904839b", - resource_group_name="promptflow", - workspace_name="promptflow-eastus", - ) + if not is_live(): + yield get_pf_client_for_playback() + else: + # enable telemetry for CI + with environment_variable_overwrite(TELEMETRY_ENABLED, "true"): + yield PFClient( + credential=get_cred(), + subscription_id="96aede12-2f73-41cb-b983-6d11a904839b", + resource_group_name="promptflow", + workspace_name="promptflow-eastus", + ) @pytest.fixture() -def remote_workspace_resource_id(): +def remote_workspace_resource_id() -> str: return "azureml:" + RESOURCE_ID_FORMAT.format( "96aede12-2f73-41cb-b983-6d11a904839b", "promptflow", AZUREML_RESOURCE_PROVIDER, "promptflow-eastus" ) -@pytest.fixture() +@pytest.fixture def remote_client_int() -> PFClient: - # enable telemetry for CI - with environment_variable_overwrite(TELEMETRY_ENABLED, "true"): - client = MLClient( - credential=get_cred(), - subscription_id="96aede12-2f73-41cb-b983-6d11a904839b", - resource_group_name="promptflow", - workspace_name="promptflow-int", - ) - yield PFClient(ml_client=client) + if not is_live(): + yield get_pf_client_for_playback() + else: + # enable telemetry for non-playback CI + with environment_variable_overwrite(TELEMETRY_ENABLED, "true"): + client = MLClient( + credential=get_cred(), + subscription_id="96aede12-2f73-41cb-b983-6d11a904839b", + resource_group_name="promptflow", + workspace_name="promptflow-int", + ) + yield PFClient(ml_client=client) @pytest.fixture() -def pf(remote_client) -> PFClient: +def pf(remote_client: PFClient) -> PFClient: yield remote_client @pytest.fixture -def remote_web_classification_data(remote_client): +def remote_web_classification_data(remote_client: PFClient) -> Data: data_name, data_version = "webClassification1", "1" try: return remote_client.ml_client.data.get(name=data_name, version=data_version) @@ -87,12 +116,12 @@ def remote_web_classification_data(remote_client): @pytest.fixture -def runtime(): +def runtime() -> str: return "demo-mir" @pytest.fixture -def runtime_int(): +def runtime_int() -> str: return "daily-image-mir" @@ -166,3 +195,64 @@ def flow_serving_client_remote_connection(mocker: MockerFixture, remote_workspac } ) return app.test_client() + + +@pytest.fixture(scope="function") +def vcr_recording(request: pytest.FixtureRequest, tenant_id: str) -> PFAzureIntegrationTestRecording: + recording = PFAzureIntegrationTestRecording.from_test_case( + test_class=request.cls, + test_func_name=request.node.name, + tenant_id=tenant_id, + ) + if not is_live_and_not_recording(): + recording.enter_vcr() + request.addfinalizer(recording.exit_vcr) + yield recording + + +@pytest.fixture +def randstr(vcr_recording: PFAzureIntegrationTestRecording) -> Callable[[str], str]: + """Return a random UUID.""" + + def generate_random_string(variable_name: str) -> str: + random_string = str(uuid.uuid4()) + return vcr_recording.get_or_record_variable(variable_name, random_string) + + return generate_random_string + + +# we expect this fixture only work when running live test without recording +# when recording, we don't want to record any application insights secrets +# when replaying, we also don't need this +@pytest.fixture(autouse=not is_live_and_not_recording()) +def mock_appinsights_log_handler(mocker: MockerFixture) -> None: + dummy_logger = logging.getLogger("dummy") + mocker.patch("promptflow._telemetry.telemetry.get_telemetry_logger", return_value=dummy_logger) + return + + +@pytest.fixture +def single_worker_thread_pool() -> None: + def single_worker_thread_pool_executor(*args, **kwargs): + return ThreadPoolExecutor(max_workers=1) + + with patch( + "promptflow.azure.operations._run_operations.ThreadPoolExecutor", + new=single_worker_thread_pool_executor, + ): + yield + + +@pytest.fixture +def mock_set_headers_with_user_aml_token(mocker: MockerFixture) -> None: + mocker.patch("promptflow.azure._restclient.flow_service_caller.FlowServiceCaller._set_headers_with_user_aml_token") + return + + +@pytest.fixture +def mock_get_azure_pf_client(mocker: MockerFixture, remote_client: PFClient) -> None: + mocker.patch( + "promptflow._cli._pf_azure._run._get_azure_pf_client", + return_value=remote_client, + ) + yield diff --git a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_arm_connection_operations.py b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_arm_connection_operations.py index 2c0d9a3ac35..1d83236b63e 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_arm_connection_operations.py +++ b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_arm_connection_operations.py @@ -1,23 +1,25 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- + import pytest +from promptflow.azure import PFClient +from promptflow.azure.operations._arm_connection_operations import ArmConnectionOperations + from .._azure_utils import DEFAULT_TEST_TIMEOUT, PYTEST_TIMEOUT_METHOD @pytest.fixture -def connection_ops(ml_client): - from promptflow.azure import PFClient - - pf = PFClient(ml_client=ml_client) - yield pf._arm_connections +def connection_ops(pf: PFClient) -> ArmConnectionOperations: + return pf._arm_connections @pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD) @pytest.mark.e2etest +@pytest.mark.usefixtures("vcr_recording") class TestArmConnectionOperations: - def test_get_connection(self, connection_ops): + def test_get_connection(self, connection_ops: ArmConnectionOperations): # Note: Secrets will be returned by arm api result = connection_ops.get(name="azure_open_ai_connection") assert ( diff --git a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli.py b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli.py index 33bc9501ef7..a23612fec85 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli.py +++ b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli.py @@ -7,6 +7,8 @@ from promptflow._cli._pf.entry import main +from ..recording_utilities import is_live + FLOWS_DIR = "./tests/test_configs/flows" RUNS_DIR = "./tests/test_configs/runs" CONNECTIONS_DIR = "./tests/test_configs/connections" @@ -31,6 +33,7 @@ def run_pf_command(*args, cwd=None): os.chdir(origin_cwd) +@pytest.mark.skipif(condition=not is_live(), reason="CLI tests, only run in live mode.") @pytest.mark.cli_test @pytest.mark.e2etest class TestCli: diff --git a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli_with_azure.py b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli_with_azure.py index 37a8d4c8002..955836befa8 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli_with_azure.py +++ b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_cli_with_azure.py @@ -1,28 +1,34 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + import os import sys import uuid +from typing import Callable import pytest +from azure.ai.ml.entities import Data from promptflow._cli._pf_azure.entry import main from promptflow._sdk.entities import Run +from promptflow.azure import PFClient from .._azure_utils import DEFAULT_TEST_TIMEOUT, PYTEST_TIMEOUT_METHOD FLOWS_DIR = "./tests/test_configs/flows" DATAS_DIR = "./tests/test_configs/datas" +RUNS_DIR = "./tests/test_configs/runs" # TODO: move this to a shared utility module -def run_pf_command(*args, pf, runtime, cwd=None): +def run_pf_command(*args, pf, runtime=None, cwd=None): origin_argv, origin_cwd = sys.argv, os.path.abspath(os.curdir) try: sys.argv = ( ["pfazure"] + list(args) + [ - "--runtime", - runtime, "--subscription", pf._ml_client.subscription_id, "--resource-group", @@ -31,6 +37,8 @@ def run_pf_command(*args, pf, runtime, cwd=None): pf._ml_client.workspace_name, ] ) + if runtime: + sys.argv += ["--runtime", runtime] if cwd: os.chdir(cwd) main() @@ -41,9 +49,15 @@ def run_pf_command(*args, pf, runtime, cwd=None): @pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD) @pytest.mark.e2etest +@pytest.mark.usefixtures( + "mock_get_azure_pf_client", + "mock_set_headers_with_user_aml_token", + "single_worker_thread_pool", + "vcr_recording", +) class TestCliWithAzure: - def test_basic_flow_run_bulk_without_env(self, pf, runtime) -> None: - name = str(uuid.uuid4()) + def test_basic_flow_run_bulk_without_env(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]) -> None: + name = randstr("name") run_pf_command( "run", "create", @@ -77,9 +91,11 @@ def test_basic_flow_with_package_tool_with_custom_strong_type_connection(self, p run = pf.runs.get(run=name) assert isinstance(run, Run) - def test_run_with_remote_data(self, pf, runtime, remote_web_classification_data, temp_output_dir: str): + def test_run_with_remote_data( + self, pf: PFClient, runtime: str, remote_web_classification_data: Data, randstr: Callable[[str], str] + ) -> None: # run with arm id - name = str(uuid.uuid4()) + name = randstr("name1") run_pf_command( "run", "create", @@ -97,7 +113,7 @@ def test_run_with_remote_data(self, pf, runtime, remote_web_classification_data, assert isinstance(run, Run) # run with name version - name = str(uuid.uuid4()) + name = randstr("name2") run_pf_command( "run", "create", @@ -113,3 +129,20 @@ def test_run_with_remote_data(self, pf, runtime, remote_web_classification_data, ) run = pf.runs.get(run=name) assert isinstance(run, Run) + + def test_run_file_with_set(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]) -> None: + name = randstr("name") + run_pf_command( + "run", + "create", + "--file", + f"{RUNS_DIR}/run_with_env.yaml", + "--set", + f"runtime={runtime}", + "--name", + name, + pf=pf, + ) + run = pf.runs.get(run=name) + assert isinstance(run, Run) + assert run.properties["azureml.promptflow.runtime_name"] == runtime diff --git a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_connection_operations.py b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_connection_operations.py index bbbdba80d56..d3834d2a291 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_connection_operations.py +++ b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_connection_operations.py @@ -1,11 +1,14 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- + import pydash import pytest from promptflow._sdk.entities._connection import _Connection +from promptflow.azure import PFClient from promptflow.azure._restclient.flow_service_caller import FlowRequestException +from promptflow.azure.operations._connection_operations import ConnectionOperations from promptflow.connections import AzureOpenAIConnection, CustomConnection from promptflow.contracts.types import Secret @@ -13,15 +16,13 @@ @pytest.fixture -def connection_ops(ml_client): - from promptflow.azure import PFClient - - pf = PFClient(ml_client=ml_client) - yield pf._connections +def connection_ops(pf: PFClient) -> ConnectionOperations: + return pf._connections @pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD) @pytest.mark.e2etest +@pytest.mark.usefixtures("vcr_recording") class TestConnectionOperations: @pytest.mark.skip(reason="Skip to avoid flooded connections in workspace.") def test_connection_get_create_delete(self, connection_ops): @@ -68,7 +69,7 @@ def test_custom_connection_create(self, connection_ops): # soft delete connection_ops.delete(name=connection.name) - def test_list_connection_spec(self, connection_ops): + def test_list_connection_spec(self, connection_ops: ConnectionOperations): result = {v.connection_type: v._to_dict() for v in connection_ops.list_connection_specs()} # Assert custom keys type assert "Custom" in result @@ -105,7 +106,7 @@ def test_list_connection_spec(self, connection_ops): for spec in expected_config_specs: assert spec in result["AzureOpenAI"]["config_specs"] - def test_get_connection(self, connection_ops): + def test_get_connection(self, connection_ops: ConnectionOperations): # Note: No secrets will be returned by MT api result = connection_ops.get(name="azure_open_ai_connection") assert ( diff --git a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py index bebdaea63bb..c2bbfcc8378 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py +++ b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_in_azure_ml.py @@ -10,6 +10,7 @@ from promptflow.connections import AzureOpenAIConnection from .._azure_utils import DEFAULT_TEST_TIMEOUT, PYTEST_TIMEOUT_METHOD +from ..recording_utilities import is_live PROMOTFLOW_ROOT = Path(__file__) / "../../../.." @@ -64,6 +65,10 @@ def update_saved_spec(component: Component, saved_spec_path: str): saved_spec_path.write_text(yaml_text) +@pytest.mark.skipif( + condition=not is_live(), + reason="flow in pipeline tests require secrets config file, only run in live mode.", +) @pytest.mark.usefixtures("use_secrets_config_file") @pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD) @pytest.mark.e2etest diff --git a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_serve.py b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_serve.py index 6ede7a77995..9e07d1a5691 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_serve.py +++ b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_flow_serve.py @@ -2,7 +2,10 @@ import pytest +from ..recording_utilities import is_live + +@pytest.mark.skipif(condition=not is_live(), reason="serving tests, only run in live mode.") @pytest.mark.usefixtures("flow_serving_client_remote_connection") @pytest.mark.e2etest def test_serving_api(flow_serving_client_remote_connection): diff --git a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_run_operations.py b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_run_operations.py index 5ab2db0e187..987aa5b070d 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_run_operations.py +++ b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_run_operations.py @@ -1,14 +1,16 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- + import json import shutil -import uuid from pathlib import Path from tempfile import TemporaryDirectory +from typing import Callable from unittest.mock import MagicMock, patch import pytest +from azure.ai.ml.entities import Data from promptflow._sdk._constants import RunStatus from promptflow._sdk._errors import InvalidRunError, RunNotFoundError @@ -20,6 +22,7 @@ from promptflow.azure.operations import RunOperations from .._azure_utils import DEFAULT_TEST_TIMEOUT, PYTEST_TIMEOUT_METHOD +from ..recording_utilities import is_live PROMOTFLOW_ROOT = Path(__file__) / "../../../.." @@ -34,28 +37,35 @@ # TODO(2528577): we should run these test with recording mode. @pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD) @pytest.mark.e2etest +@pytest.mark.usefixtures( + "mock_set_headers_with_user_aml_token", + "single_worker_thread_pool", + "vcr_recording", +) class TestFlowRun: - def test_run_bulk(self, remote_client, pf, runtime): + def test_run_bulk(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): + name = randstr("name") run = pf.run( flow=f"{FLOWS_DIR}/web_classification", data=f"{DATAS_DIR}/webClassification1.jsonl", column_mapping={"url": "${data.url}"}, variant="${summarize_text_content.variant_0}", runtime=runtime, + name=name, ) assert isinstance(run, Run) - assert run.name.startswith("web_classification") + assert run.name == name - def test_run_bulk_from_yaml(self, remote_client, pf, runtime): - run_id = str(uuid.uuid4()) + def test_run_bulk_from_yaml(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): + run_id = randstr("run_id") run = load_run( source=f"{RUNS_DIR}/sample_bulk_run_cloud.yaml", params_override=[{"name": run_id, "runtime": runtime}], ) - run = remote_client.runs.create_or_update(run=run) + run = pf.runs.create_or_update(run=run) assert isinstance(run, Run) - def test_basic_evaluation(self, pf, runtime): + def test_basic_evaluation(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): data_path = f"{DATAS_DIR}/webClassification3.jsonl" run = pf.run( @@ -64,6 +74,7 @@ def test_basic_evaluation(self, pf, runtime): column_mapping={"url": "${data.url}"}, variant="${summarize_text_content.variant_0}", runtime=runtime, + name=randstr("batch_run_name"), ) assert isinstance(run, Run) run = pf.runs.stream(run=run.name) @@ -75,6 +86,7 @@ def test_basic_evaluation(self, pf, runtime): run=run, column_mapping={"groundtruth": "${data.answer}", "prediction": "${run.outputs.category}"}, runtime=runtime, + name=randstr("eval_run_name"), ) assert isinstance(eval_run, Run) pf.runs.stream(run=eval_run.name) @@ -89,11 +101,12 @@ def test_basic_evaluation(self, pf, runtime): "prediction": "${run.outputs.category}", }, runtime=runtime, + name=randstr("eval_run_name_1"), ) assert isinstance(eval_run, Run) pf.runs.stream(run=eval_run.name) - def test_run_with_connection_overwrite(self, remote_client, pf, runtime): + def test_run_with_connection_overwrite(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): run = pf.run( flow=f"{FLOWS_DIR}/web_classification", data=f"{DATAS_DIR}/webClassification1.jsonl", @@ -101,29 +114,34 @@ def test_run_with_connection_overwrite(self, remote_client, pf, runtime): variant="${summarize_text_content.variant_0}", connections={"classify_with_llm": {"connection": "azure_open_ai", "model": "gpt-3.5-turbo"}}, runtime=runtime, + name=randstr("name"), ) assert isinstance(run, Run) - def test_run_with_env_overwrite(self, remote_client, pf, runtime): + def test_run_with_env_overwrite(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): run = load_run( source=f"{RUNS_DIR}/run_with_env.yaml", params_override=[{"runtime": runtime}], ) - run = remote_client.runs.create_or_update(run=run) + run.name = randstr("name") + run = pf.runs.create_or_update(run=run) assert isinstance(run, Run) - def test_run_display_name_with_macro(self, pf, runtime): + def test_run_display_name_with_macro(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): run = load_run( source=f"{RUNS_DIR}/run_with_env.yaml", params_override=[{"runtime": runtime}], ) + run.name = randstr("name") run.display_name = "my_display_name_${variant_id}_${timestamp}" run = pf.runs.create_or_update(run=run) assert run.display_name.startswith("my_display_name_variant_0_") assert "${timestamp}" not in run.display_name assert isinstance(run, Run) - def test_run_with_remote_data(self, remote_client, pf, runtime, remote_web_classification_data): + def test_run_with_remote_data( + self, pf: PFClient, runtime: str, remote_web_classification_data: Data, randstr: Callable[[str], str] + ): # run with arm id run = pf.run( flow=f"{FLOWS_DIR}/web_classification", @@ -131,6 +149,7 @@ def test_run_with_remote_data(self, remote_client, pf, runtime, remote_web_class column_mapping={"url": "${data.url}"}, variant="${summarize_text_content.variant_0}", runtime=runtime, + name=randstr("name1"), ) assert isinstance(run, Run) # run with name version @@ -140,10 +159,12 @@ def test_run_with_remote_data(self, remote_client, pf, runtime, remote_web_class column_mapping={"url": "${data.url}"}, variant="${summarize_text_content.variant_0}", runtime=runtime, + name=randstr("name2"), ) assert isinstance(run, Run) - def test_run_bulk_not_exist(self, pf, runtime): + # TODO: confirm whether this test is a end-to-end test + def test_run_bulk_not_exist(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): test_data = f"{DATAS_DIR}/webClassification1.jsonl" with pytest.raises(FileNotFoundError) as e: pf.run( @@ -153,19 +174,29 @@ def test_run_bulk_not_exist(self, pf, runtime): column_mapping={"url": "${data.url}"}, variant="${summarize_text_content.variant_0}", runtime=runtime, + name=randstr("name"), ) assert "does not exist" in str(e.value) - def test_list_runs(self, remote_client): - runs = remote_client.runs.list(max_results=10) + def test_list_runs(self, pf: PFClient): + runs = pf.runs.list(max_results=10) for run in runs: print(json.dumps(run._to_dict(), indent=4)) assert len(runs) == 10 - def test_show_run(self, remote_client): - run = remote_client.runs.get(run="classification_accuracy_eval_default_20230808_153241_422491") + def test_show_run(self, pf: PFClient, tenant_id: str): + run = pf.runs.get(run="classification_accuracy_eval_default_20230808_153241_422491") run_dict = run._to_dict() print(json.dumps(run_dict, indent=4)) + + subscription_id = pf.ml_client.subscription_id + resource_group_name = pf.ml_client.resource_group_name + workspace_name = pf.ml_client.workspace_name + # find this miss sanitization during test, use this as a workaround + miss_sanitization = "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5" if tenant_id else workspace_name + if not tenant_id: + tenant_id = "00000000-0000-0000-0000-000000000000" + assert run_dict == { "name": "classification_accuracy_eval_default_20230808_153241_422491", "created_on": "2023-08-08T07:32:52.761030+00:00", @@ -186,52 +217,52 @@ def test_show_run(self, remote_client): "userPuId": "10032000324F7449", "userIdp": None, "userAltSecId": None, - "userIss": "https://sts.windows.net/72f988bf-86f1-41af-91ab-2d7cd011db47/", - "userTenantId": "72f988bf-86f1-41af-91ab-2d7cd011db47", + "userIss": f"https://sts.windows.net/{tenant_id}/", + "userTenantId": tenant_id, "userName": "Honglin Du", "upn": None, }, "start_time": "2023-08-08T07:32:56.637761+00:00", "end_time": "2023-08-08T07:33:07.853922+00:00", "duration": "00:00:11.2161606", - "portal_url": "https://ml.azure.com/prompts/flow/bulkrun/run/classification_accuracy_eval_default_20230808_153241_422491/details?wsid=/subscriptions/96aede12-2f73-41cb-b983-6d11a904839b/resourceGroups/promptflow/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus", # noqa: E501 + "portal_url": f"https://ml.azure.com/prompts/flow/bulkrun/run/classification_accuracy_eval_default_20230808_153241_422491/details?wsid=/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}", # noqa: E501 "data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/312cca2af474e5f895013392b6b38f45/data.jsonl", # noqa: E501 - "data_portal_url": "https://ml.azure.com/data/datastore/workspaceblobstore/edit?wsid=/subscriptions/96aede12-2f73-41cb-b983-6d11a904839b/resourceGroups/promptflow/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus&activeFilePath=LocalUpload/312cca2af474e5f895013392b6b38f45/data.jsonl#browseTab", # noqa: E501 - "output": "azureml://locations/eastus/workspaces/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/data/azureml_classification_accuracy_eval_default_20230808_153241_422491_output_data_flow_outputs/versions/1", # noqa: E501 - "output_portal_url": "https://ml.azure.com/data/azureml_classification_accuracy_eval_default_20230808_153241_422491_output_data_flow_outputs/1/details?wsid=/subscriptions/96aede12-2f73-41cb-b983-6d11a904839b/resourceGroups/promptflow/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus", # noqa: E501 + "data_portal_url": f"https://ml.azure.com/data/datastore/workspaceblobstore/edit?wsid=/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}&activeFilePath=LocalUpload/312cca2af474e5f895013392b6b38f45/data.jsonl#browseTab", # noqa: E501 + "output": f"azureml://locations/eastus/workspaces/{miss_sanitization}/data/azureml_classification_accuracy_eval_default_20230808_153241_422491_output_data_flow_outputs/versions/1", # noqa: E501 + "output_portal_url": f"https://ml.azure.com/data/azureml_classification_accuracy_eval_default_20230808_153241_422491_output_data_flow_outputs/1/details?wsid=/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}", # noqa: E501 "run": "web_classification_default_20230804_143634_056856", - "input_run_portal_url": "https://ml.azure.com/prompts/flow/bulkrun/run/web_classification_default_20230804_143634_056856/details?wsid=/subscriptions/96aede12-2f73-41cb-b983-6d11a904839b/resourceGroups/promptflow/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus", # noqa: E501 + "input_run_portal_url": f"https://ml.azure.com/prompts/flow/bulkrun/run/web_classification_default_20230804_143634_056856/details?wsid=/subscriptions/{subscription_id}/resourceGroups/{resource_group_name}/providers/Microsoft.MachineLearningServices/workspaces/{workspace_name}", # noqa: E501 } - def test_show_run_details(self, remote_client): + def test_show_run_details(self, pf: PFClient): run = "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74" # get first 20 results - details = remote_client.get_details(run=run, max_results=20) + details = pf.get_details(run=run, max_results=20) assert details.shape[0] == 20 # get first 1000 results while it only has 40 - details = remote_client.get_details(run=run, max_results=1000) + details = pf.get_details(run=run, max_results=1000) assert details.shape[0] == 40 # get all results - details = remote_client.get_details( + details = pf.get_details( run=run, all_results=True, ) assert details.shape[0] == 40 # get all results even if max_results is set to 10 - details = remote_client.get_details( + details = pf.get_details( run=run, max_results=10, all_results=True, ) assert details.shape[0] == 40 - def test_show_metrics(self, remote_client): - metrics = remote_client.runs.get_metrics( + def test_show_metrics(self, pf: PFClient): + metrics = pf.runs.get_metrics( run="4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", ) print(json.dumps(metrics, indent=4)) @@ -242,24 +273,29 @@ def test_show_metrics(self, remote_client): "gpt_relevance_pass_rate(%).variant_0": 0.0, } - def test_stream_run_logs(self, remote_client, pf): + def test_stream_invalid_run_logs(self, pf: PFClient, randstr: Callable[[str], str]): # test get invalid run name - non_exist_run = str(uuid.uuid4()) + non_exist_run = randstr("non_exist_run") with pytest.raises(RunNotFoundError, match=f"Run {non_exist_run!r} not found"): pf.runs.stream(run=non_exist_run) - run = remote_client.runs.stream(run="4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74") + def test_stream_run_logs(self, pf: PFClient): + run = pf.runs.stream(run="4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74") assert run.status == RunStatus.COMPLETED - def test_stream_failed_run_logs(self, remote_client, pf, capfd): - run = remote_client.runs.stream(run="3dfd077a-f071-443e-9c4e-d41531710950") - out, err = capfd.readouterr() + def test_stream_failed_run_logs(self, pf: PFClient, capfd): + run = pf.runs.stream(run="3dfd077a-f071-443e-9c4e-d41531710950") + out, _ = capfd.readouterr() print(out) assert run.status == "Failed" # error info will store in run dict assert "error" in run._to_dict() - def test_archive_and_restore_run(self, remote_client): + @pytest.mark.skipif( + condition=not is_live(), + reason="cannot differ the two requests to run history in replay mode.", + ) + def test_archive_and_restore_run(self, pf: PFClient): from promptflow._sdk._constants import RunHistoryKeys run_meta_data = RunHistoryKeys.RunMetaData @@ -268,24 +304,24 @@ def test_archive_and_restore_run(self, remote_client): run_id = "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74" # test archive - remote_client.runs.archive(run=run_id) - run_data = remote_client.runs._get_run_from_run_history(run_id, original_form=True)[run_meta_data] + pf.runs.archive(run=run_id) + run_data = pf.runs._get_run_from_run_history(run_id, original_form=True)[run_meta_data] assert run_data[hidden] is True # test restore - remote_client.runs.restore(run=run_id) - run_data = remote_client.runs._get_run_from_run_history(run_id, original_form=True)[run_meta_data] + pf.runs.restore(run=run_id) + run_data = pf.runs._get_run_from_run_history(run_id, original_form=True)[run_meta_data] assert run_data[hidden] is False - def test_update_run(self, remote_client): + def test_update_run(self, pf: PFClient, randstr: Callable[[str], str]): run_id = "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74" - test_mark = str(uuid.uuid4()) + test_mark = randstr("test_mark") new_display_name = f"test_display_name_{test_mark}" new_description = f"test_description_{test_mark}" new_tags = {"test_tag": test_mark} - run = remote_client.runs.update( + run = pf.runs.update( run=run_id, display_name=new_display_name, description=new_description, @@ -296,7 +332,7 @@ def test_update_run(self, remote_client): assert run.tags["test_tag"] == test_mark # test wrong type of parameters won't raise error, just log warnings and got ignored - run = remote_client.runs.update( + run = pf.runs.update( run=run_id, tags={"test_tag": {"a": 1}}, ) @@ -304,15 +340,19 @@ def test_update_run(self, remote_client): assert run.description == new_description assert run.tags["test_tag"] == test_mark - def test_run_with_additional_includes(self, remote_client, pf, runtime): + @pytest.mark.skipif( + condition=not is_live(), reason="request uri contains temp folder name, need some time to sanitize." + ) + def test_run_with_additional_includes(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): run = pf.run( flow=f"{FLOWS_DIR}/web_classification_with_additional_include", data=f"{DATAS_DIR}/webClassification1.jsonl", inputs_mapping={"url": "${data.url}"}, variant="${summarize_text_content.variant_0}", runtime=runtime, + name=randstr("name"), ) - run = remote_client.runs.stream(run=run.name) + run = pf.runs.stream(run=run.name) assert run.status == RunStatus.COMPLETED # Test additional includes don't exist @@ -323,6 +363,7 @@ def test_run_with_additional_includes(self, remote_client, pf, runtime): inputs_mapping={"url": "${data.url}"}, variant="${summarize_text_content.variant_0}", runtime=runtime, + name=randstr("name_invalid"), ) assert "Unable to find additional include ../invalid/file/path" in str(e.value) @@ -398,7 +439,7 @@ def test_run_bulk_without_retry(self, remote_client): remote_client.runs.create_or_update(run=mock_run) assert mock_request.call_count == 4 - def test_pf_run_with_env_var(self, remote_client, pf): + def test_pf_run_with_env_var(self, pf: PFClient, randstr: Callable[[str], str]): def create_or_update(run, **kwargs): # make run.flow a datastore path uri, so that it can be parsed by AzureMLDatastorePathUri run.flow = "azureml://datastores/workspaceblobstore/paths/LocalUpload/not/important/path" @@ -411,10 +452,11 @@ def create_or_update(run, **kwargs): flow=f"{FLOWS_DIR}/print_env_var", data=f"{DATAS_DIR}/env_var_names.jsonl", environment_variables=env_var, + name=randstr("name"), ) assert run._to_rest_object().environment_variables == env_var - def test_automatic_runtime(self, remote_client, pf): + def test_automatic_runtime(self, pf: PFClient, randstr: Callable[[str], str]): from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller def submit(*args, **kwargs): @@ -432,6 +474,7 @@ def submit(*args, **kwargs): pf.run( flow=f"{FLOWS_DIR}/print_env_var", data=f"{DATAS_DIR}/env_var_names.jsonl", + name=randstr("name1"), ) with patch.object(FlowServiceCaller, "submit_bulk_run") as mock_submit, patch.object( @@ -439,9 +482,14 @@ def submit(*args, **kwargs): ), patch.object(FlowServiceCaller, "create_flow_session"): mock_submit.side_effect = submit # automatic is a reserved runtime name, will use automatic runtime if specified. - pf.run(flow=f"{FLOWS_DIR}/print_env_var", data=f"{DATAS_DIR}/env_var_names.jsonl", runtime="automatic") + pf.run( + flow=f"{FLOWS_DIR}/print_env_var", + data=f"{DATAS_DIR}/env_var_names.jsonl", + runtime="automatic", + name=randstr("name2"), + ) - def test_automatic_runtime_with_environment(self, pf): + def test_automatic_runtime_with_environment(self, pf: PFClient, randstr: Callable[[str], str]): from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller def submit(*args, **kwargs): @@ -458,16 +506,18 @@ def submit(*args, **kwargs): pf.run( flow=f"{FLOWS_DIR}/flow_with_environment", data=f"{DATAS_DIR}/env_var_names.jsonl", + name=randstr("name"), ) - def test_run_data_not_provided(self, pf): + def test_run_data_not_provided(self, pf: PFClient, randstr: Callable[[str], str]): with pytest.raises(ValueError) as e: pf.run( flow=f"{FLOWS_DIR}/web_classification", + name=randstr("name"), ) assert "at least one of data or run must be provided" in str(e) - def test_run_without_dump(self, remote_client: PFClient, pf, runtime: str) -> None: + def test_run_without_dump(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]) -> None: from promptflow._sdk._errors import RunNotFoundError from promptflow._sdk._orm.run_info import RunInfo @@ -477,12 +527,13 @@ def test_run_without_dump(self, remote_client: PFClient, pf, runtime: str) -> No column_mapping={"url": "${data.url}"}, variant="${summarize_text_content.variant_0}", runtime=runtime, + name=randstr("name"), ) # cloud run should not dump to database with pytest.raises(RunNotFoundError): RunInfo.get(run.name) - def test_input_mapping_with_dict(self, pf, runtime: str): + def test_input_mapping_with_dict(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): data_path = f"{DATAS_DIR}/webClassification3.jsonl" run = pf.run( @@ -490,21 +541,23 @@ def test_input_mapping_with_dict(self, pf, runtime: str): data=data_path, column_mapping=dict(key={"a": 1}, extra="${data.url}"), runtime=runtime, + name=randstr("name"), ) assert '"{\\"a\\": 1}"' in run.properties["azureml.promptflow.inputs_mapping"] run = pf.runs.stream(run=run) assert run.status == "Completed" - def test_get_invalid_run_cases(self, pf): + def test_get_invalid_run_cases(self, pf: PFClient, randstr: Callable[[str], str]): # test get invalid run type with pytest.raises(InvalidRunError, match="expected 'str' or 'Run' object"): pf.runs.get(run=object()) # test get invalid run name - non_exist_run = str(uuid.uuid4()) + non_exist_run = randstr("non_exist_run") with pytest.raises(RunNotFoundError, match=f"Run {non_exist_run!r} not found"): pf.runs.get(run=non_exist_run) + # TODO: need to confirm whether this is an end-to-end test def test_exp_id(self): with TemporaryDirectory() as tmp_dir: shutil.copytree(f"{FLOWS_DIR}/flow_with_dict_input", f"{tmp_dir}/flow dir with space") @@ -523,7 +576,7 @@ def test_exp_id(self): rest_run = run._to_rest_object() assert rest_run.run_experiment_name == "flow_dir_with_dash" - def test_tools_json_ignored(self, pf): + def test_tools_json_ignored(self, pf: PFClient, randstr: Callable[[str], str]): from azure.ai.ml._artifacts._blob_storage_helper import BlobStorageClient from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller @@ -545,6 +598,7 @@ def fake_upload_file(storage_client, source: str, dest, *args, **kwargs): data=data_path, column_mapping={"key": {"value": "1"}, "url": "${data.url}"}, runtime="fake_runtime", + name=randstr("name"), ) # make sure .promptflow/flow.tools.json not uploaded @@ -552,44 +606,7 @@ def fake_upload_file(storage_client, source: str, dest, *args, **kwargs): if ".promptflow/flow.tools.json" in f: raise Exception(f"flow.tools.json should not be uploaded, got {f}") - def test_automatic_runtime_creation_user_aml_token(self, pf): - from azure.core.pipeline import Pipeline - - def submit(*args, **kwargs): - assert "aml-user-token" in args[0].headers - - fake_response = MagicMock() - fake_response.http_response.status_code = 200 - return fake_response - - with patch.object(Pipeline, "run") as mock_session_create: - mock_session_create.side_effect = submit - pf.runs._resolve_runtime( - run=Run( - flow=Path(f"{FLOWS_DIR}/flow_with_environment"), - data=f"{DATAS_DIR}/env_var_names.jsonl", - ), - flow_path=Path(f"{FLOWS_DIR}/flow_with_environment"), - runtime=None, - ) - - def test_submit_run_user_aml_token(self, pf, runtime): - from promptflow.azure._restclient.flow.operations import BulkRunsOperations - - def submit(*args, **kwargs): - headers = kwargs.get("headers", None) - assert "aml-user-token" in headers - - with patch.object(BulkRunsOperations, "submit_bulk_run") as mock_submit, patch.object(RunOperations, "get"): - mock_submit.side_effect = submit - pf.run( - flow=f"{FLOWS_DIR}/flow_with_dict_input", - data=f"{DATAS_DIR}/webClassification3.jsonl", - column_mapping={"key": {"value": "1"}, "url": "${data.url}"}, - runtime=runtime, - ) - - def test_flow_id_in_submission(self, remote_client, pf, runtime): + def test_flow_id_in_submission(self, pf: PFClient, runtime: str, randstr: Callable[[str], str]): from promptflow.azure._restclient.flow_service_caller import FlowServiceCaller flow_path = f"{FLOWS_DIR}/print_env_var" @@ -611,6 +628,7 @@ def submit(*args, **kwargs): flow=flow_path, data=f"{DATAS_DIR}/env_var_names.jsonl", runtime=runtime, + name=randstr("name1"), ) with patch.object(FlowServiceCaller, "submit_bulk_run") as mock_submit, patch.object( @@ -621,6 +639,7 @@ def submit(*args, **kwargs): pf.run( flow=flow_path, data=f"{DATAS_DIR}/env_var_names.jsonl", + name=randstr("name2"), ) @pytest.mark.skip(reason="temporarily disable this for service-side error.") @@ -637,7 +656,7 @@ def test_automatic_runtime_creation_failure(self, pf): ) assert "Session creation failed for" in str(e.value) - def test_run_submission_exception(self, remote_client): + def test_run_submission_exception(self, pf: PFClient): from azure.core.exceptions import HttpResponseError from promptflow.azure._restclient.flow.operations import BulkRunsOperations @@ -646,7 +665,7 @@ def fake_submit(*args, **kwargs): headers = kwargs.get("headers", None) request_id_in_headers = headers["x-ms-client-request-id"] # request id in headers should be same with request id in service caller - assert request_id_in_headers == remote_client.runs._service_caller._request_id + assert request_id_in_headers == pf.runs._service_caller._request_id raise HttpResponseError("customized error message.") with patch.object(BulkRunsOperations, "submit_bulk_run") as mock_request, patch.object( @@ -654,34 +673,82 @@ def fake_submit(*args, **kwargs): ): mock_request.side_effect = fake_submit with pytest.raises(FlowRequestException) as e: - original_request_id = remote_client.runs._service_caller._request_id - remote_client.runs._service_caller.submit_bulk_run( + original_request_id = pf.runs._service_caller._request_id + pf.runs._service_caller.submit_bulk_run( subscription_id="fake_subscription_id", resource_group_name="fake_resource_group", workspace_name="fake_workspace_name", ) # request id has been updated - assert original_request_id != remote_client.runs._service_caller._request_id + assert original_request_id != pf.runs._service_caller._request_id # original error message should be included in FlowRequestException assert "customized error message" in str(e.value) # request id should be included in FlowRequestException - assert f"request id: {remote_client.runs._service_caller._request_id}" in str(e.value) + assert f"request id: {pf.runs._service_caller._request_id}" in str(e.value) - def test_get_detail_against_partial_fail_run(self, remote_client, pf, runtime) -> None: + def test_get_detail_against_partial_fail_run( + self, pf: PFClient, runtime: str, randstr: Callable[[str], str] + ) -> None: run = pf.run( flow=f"{FLOWS_DIR}/partial_fail", data=f"{FLOWS_DIR}/partial_fail/data.jsonl", runtime=runtime, + name=randstr("name"), ) pf.runs.stream(run=run.name) - detail = remote_client.get_details(run=run.name) + detail = pf.get_details(run=run.name) assert len(detail) == 3 - def test_vnext_workspace_base_url(self, pf): + # TODO: seems another unit test... + def test_vnext_workspace_base_url(self): from promptflow.azure._restclient.service_caller_factory import _FlowServiceCallerFactory mock_workspace = MagicMock() mock_workspace.discovery_url = "https://promptflow.azure-api.net/discovery/workspaces/fake_workspace_id" service_caller = _FlowServiceCallerFactory.get_instance(workspace=mock_workspace, credential=MagicMock()) assert service_caller.caller._client._base_url == "https://promptflow.azure-api.net/" + + +# separate some tests as they cannot use the fixture that mocks the aml-user-token +@pytest.mark.skipif(condition=not is_live(), reason="aml-user-token will be mocked") +@pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD) +@pytest.mark.e2etest +@pytest.mark.usefixtures("single_worker_thread_pool", "vcr_recording") +class TestFlowRunRelatedToAMLToken: + def test_automatic_runtime_creation_user_aml_token(self, pf: PFClient): + from azure.core.pipeline import Pipeline + + def submit(*args, **kwargs): + assert "aml-user-token" in args[0].headers + + fake_response = MagicMock() + fake_response.http_response.status_code = 200 + return fake_response + + with patch.object(Pipeline, "run") as mock_session_create: + mock_session_create.side_effect = submit + pf.runs._resolve_runtime( + run=Run( + flow=Path(f"{FLOWS_DIR}/flow_with_environment"), + data=f"{DATAS_DIR}/env_var_names.jsonl", + ), + flow_path=Path(f"{FLOWS_DIR}/flow_with_environment"), + runtime=None, + ) + + def test_submit_run_user_aml_token(self, pf, runtime): + from promptflow.azure._restclient.flow.operations import BulkRunsOperations + + def submit(*args, **kwargs): + headers = kwargs.get("headers", None) + assert "aml-user-token" in headers + + with patch.object(BulkRunsOperations, "submit_bulk_run") as mock_submit, patch.object(RunOperations, "get"): + mock_submit.side_effect = submit + pf.run( + flow=f"{FLOWS_DIR}/flow_with_dict_input", + data=f"{DATAS_DIR}/webClassification3.jsonl", + column_mapping={"key": {"value": "1"}, "url": "${data.url}"}, + runtime=runtime, + ) diff --git a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_telemetry.py b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_telemetry.py index 712baca03d4..2758c92c0c7 100644 --- a/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_telemetry.py +++ b/src/promptflow/tests/sdk_cli_azure_test/e2etests/test_telemetry.py @@ -17,6 +17,7 @@ from promptflow._utils.utils import environment_variable_overwrite from .._azure_utils import DEFAULT_TEST_TIMEOUT, PYTEST_TIMEOUT_METHOD +from ..recording_utilities import is_live @contextlib.contextmanager @@ -43,6 +44,7 @@ def cli_eu_config_overwrite(): config.set_config(Configuration.EU_USER, False) +@pytest.mark.skipif(condition=not is_live(), reason="telemetry tests, only run in live mode.") @pytest.mark.timeout(timeout=DEFAULT_TEST_TIMEOUT, method=PYTEST_TIMEOUT_METHOD) @pytest.mark.e2etest class TestTelemetry: diff --git a/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/__init__.py b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/__init__.py new file mode 100644 index 00000000000..948e25e8fa4 --- /dev/null +++ b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/__init__.py @@ -0,0 +1,13 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from .bases import PFAzureIntegrationTestRecording +from .utils import get_pf_client_for_playback, is_live, is_live_and_not_recording + +__all__ = [ + "PFAzureIntegrationTestRecording", + "get_pf_client_for_playback", + "is_live", + "is_live_and_not_recording", +] diff --git a/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/bases.py b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/bases.py new file mode 100644 index 00000000000..b2bb6a16f2f --- /dev/null +++ b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/bases.py @@ -0,0 +1,227 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +import inspect +import json +from pathlib import Path +from typing import Dict, List + +import vcr +from vcr.request import Request + +from .constants import FILTER_HEADERS, TEST_CLASSES_FOR_RUN_INTEGRATION_TEST_RECORDING, SanitizedValues +from .processors import ( + AzureOpenAIConnectionProcessor, + AzureResourceProcessor, + AzureWorkspaceTriadProcessor, + DropProcessor, + RecordingProcessor, + StorageProcessor, + TenantProcessor, +) +from .utils import is_live, is_live_and_not_recording, sanitize_upload_hash +from .variable_recorder import VariableRecorder + + +class PFAzureIntegrationTestRecording: + def __init__(self, test_class, test_func_name: str, tenant_id: str): + self.test_class = test_class + self.test_func_name = test_func_name + self.tenant_id = tenant_id + self.is_live = is_live() + self.recording_file = self._get_recording_file() + self.recording_processors = self._get_recording_processors() + self.replay_processors = self._get_replay_processors() + self.vcr = self._init_vcr() + self._cm = None # context manager from VCR + self.cassette = None + self.variable_recorder = VariableRecorder() + + @staticmethod + def from_test_case(test_class, test_func_name: str, **kwargs) -> "PFAzureIntegrationTestRecording": + test_class_name = test_class.__name__ + tenant_id = kwargs.get("tenant_id", "") + if test_class_name in TEST_CLASSES_FOR_RUN_INTEGRATION_TEST_RECORDING: + return PFAzureRunIntegrationTestRecording(test_class, test_func_name, tenant_id=tenant_id) + else: + return PFAzureIntegrationTestRecording(test_class, test_func_name, tenant_id=tenant_id) + + def _get_recording_file(self) -> Path: + # recording files are expected to be located at "tests/test_configs/recordings" + # test file path should locate at "tests/sdk_cli_azure_test/e2etests" + test_file_path = Path(inspect.getfile(self.test_class)).resolve() + recording_dir = (test_file_path.parent.parent.parent / "test_configs" / "recordings").resolve() + recording_dir.mkdir(exist_ok=True) + + test_file_name = test_file_path.stem + test_class_name = self.test_class.__name__ + if "[" in self.test_func_name: + # for tests that use pytest.mark.parametrize, there will be "[]" in test function name + # recording filename pattern: + # {test_file_name}_{test_class_name}_{test_func_name}/{parameter_id}.yaml + test_func_name, parameter_id = self.test_func_name.split("[") + parameter_id = parameter_id.rstrip("]") + test_func_dir = (recording_dir / f"{test_file_name}_{test_class_name}_{test_func_name}").resolve() + test_func_dir.mkdir(exist_ok=True) + recording_file = (test_func_dir / f"{parameter_id}.yaml").resolve() + else: + # for most remaining tests + # recording filename pattern: {test_file_name}_{test_class_name}_{test_func_name}.yaml + recording_filename = f"{test_file_name}_{test_class_name}_{self.test_func_name}.yaml" + recording_file = (recording_dir / recording_filename).resolve() + if self.is_live and not is_live_and_not_recording() and recording_file.is_file(): + recording_file.unlink() + return recording_file + + def _init_vcr(self) -> vcr.VCR: + return vcr.VCR( + cassette_library_dir=self.recording_file.parent.as_posix(), + before_record_request=self._process_request_recording, + before_record_response=self._process_response_recording, + decode_compressed_response=True, + record_mode="none" if not self.is_live else "all", + filter_headers=FILTER_HEADERS, + ) + + def enter_vcr(self): + self._cm = self.vcr.use_cassette(self.recording_file.as_posix()) + self.cassette = self._cm.__enter__() + + def exit_vcr(self): + if self.is_live and not is_live_and_not_recording(): + self._postprocess_recording() + self._cm.__exit__() + + def _process_request_recording(self, request: Request) -> Request: + if is_live_and_not_recording(): + return request + + if self.is_live: + for processor in self.recording_processors: + request = processor.process_request(request) + else: + for processor in self.replay_processors: + request = processor.process_request(request) + return request + + def _process_response_recording(self, response: Dict) -> Dict: + if is_live_and_not_recording(): + return response + + response["body"]["string"] = response["body"]["string"].decode("utf-8") + if self.is_live: + # lower and filter some headers + headers = {} + for k in response["headers"]: + if k.lower() not in FILTER_HEADERS: + headers[k.lower()] = response["headers"][k] + response["headers"] = headers + + for processor in self.recording_processors: + response = processor.process_response(response) + else: + for processor in self.replay_processors: + response = processor.process_response(response) + response["body"]["string"] = response["body"]["string"].encode("utf-8") + return response + + def _get_recording_processors(self) -> List[RecordingProcessor]: + return [ + AzureOpenAIConnectionProcessor(), + AzureResourceProcessor(), + AzureWorkspaceTriadProcessor(), + DropProcessor(), + TenantProcessor(tenant_id=self.tenant_id), + ] + + def _get_replay_processors(self) -> List[RecordingProcessor]: + return [] + + def get_or_record_variable(self, variable: str, default: str) -> str: + if is_live(): + return self.variable_recorder.get_or_record_variable(variable, default) + else: + # return variable when playback, which is expected to be sanitized + return variable + + def _postprocess_recording(self) -> None: + self._apply_replacement_for_recordings() + return + + def _apply_replacement_for_recordings(self) -> None: + for i in range(len(self.cassette.data)): + req, resp = self.cassette.data[i] + req = self.variable_recorder.sanitize_request(req) + resp = self.variable_recorder.sanitize_response(resp) + self.cassette.data[i] = (req, resp) + return + + +class PFAzureRunIntegrationTestRecording(PFAzureIntegrationTestRecording): + def _init_vcr(self) -> vcr.VCR: + _vcr = super(PFAzureRunIntegrationTestRecording, self)._init_vcr() + _vcr.register_matcher("path", self._custom_request_path_matcher) + _vcr.register_matcher("body", self._custom_request_body_matcher) + return _vcr + + def enter_vcr(self): + self._cm = self.vcr.use_cassette( + self.recording_file.as_posix(), + allow_playback_repeats=True, + filter_query_parameters=["api-version"], + ) + self.cassette = self._cm.__enter__() + + def _get_recording_processors(self) -> List[RecordingProcessor]: + recording_processors = super(PFAzureRunIntegrationTestRecording, self)._get_recording_processors() + recording_processors.append(StorageProcessor()) + return recording_processors + + def _postprocess_recording(self) -> None: + self._drop_duplicate_recordings() + super(PFAzureRunIntegrationTestRecording, self)._postprocess_recording() + + def _drop_duplicate_recordings(self) -> None: + dropped_recordings = [] + run_data_requests = dict() + log_content_requests = dict() + for req, resp in self.cassette.data: + # run hisotry's rundata API + if str(req.path).endswith("/rundata"): + body = req.body.decode("utf-8") + body_dict = json.loads(body) + name = body_dict["runId"] + run_data_requests[name] = (req, resp) + continue + if str(req.path).endswith("/logContent"): + log_content_requests[req.uri] = (req, resp) + continue + dropped_recordings.append((req, resp)) + # append rundata recording(s) + for req, resp in run_data_requests.values(): + dropped_recordings.append((req, resp)) + for req, resp in log_content_requests.values(): + dropped_recordings.append((req, resp)) + + self.cassette.data = dropped_recordings + return + + def _custom_request_path_matcher(self, r1: Request, r2: Request) -> bool: + # for blob storage request, sanitize the upload hash in path + if r1.host == r2.host and r1.host == SanitizedValues.BLOB_STORAGE_REQUEST_HOST: + return sanitize_upload_hash(r1.path) == r2.path + return r1.path == r2.path + + def _custom_request_body_matcher(self, r1: Request, r2: Request) -> bool: + if r1.path == r2.path: + # /BulkRuns/submit - submit run, match by "runId" in body + # /rundata - get run, match by "runId" in body + if r1.path.endswith("/BulkRuns/submit") or r1.path.endswith("/rundata"): + return r1.body.get("runId") == r2.body.get("runId") + else: + # we don't match by body for other requests, so return True + return True + else: + # path no match, so this pair shall not match + return False diff --git a/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/constants.py b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/constants.py new file mode 100644 index 00000000000..ea71e9bc134 --- /dev/null +++ b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/constants.py @@ -0,0 +1,60 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +TEST_RUN_LIVE = "PROMPT_FLOW_TEST_RUN_LIVE" +SKIP_LIVE_RECORDING = "PROMPT_FLOW_SKIP_LIVE_RECORDING" + +FILTER_HEADERS = [ + "aml-user-token", + "authorization", + "date", + "etag", + "request-context", + "x-aml-cluster", + "x-ms-access-tier", + "x-ms-access-tier-inferred", + "x-ms-client-request-id", + "x-ms-client-session-id", + "x-ms-client-user-type", + "x-ms-correlation-request-id", + "x-ms-lease-state", + "x-ms-lease-status", + "x-ms-server-encrypted", + "x-ms-ratelimit-remaining-subscription-reads", + "x-ms-ratelimit-remaining-subscription-writes", + "x-ms-response-type", + "x-ms-request-id", + "x-ms-routing-request-id", + "x-msedge-ref", +] + + +class SanitizedValues: + SUBSCRIPTION_ID = "00000000-0000-0000-0000-000000000000" + RESOURCE_GROUP_NAME = "00000" + WORKSPACE_NAME = "00000" + TENANT_ID = "00000000-0000-0000-0000-000000000000" + # workspace + DISCOVERY_URL = "https://eastus.api.azureml.ms/discovery" + # datastore + FAKE_KEY = "this is fake key" + FAKE_ACCOUNT_NAME = "fake_account_name" + FAKE_CONTAINER_NAME = "fake-container-name" + # aoai connection + FAKE_API_BASE = "https://fake.openai.azure.com" + # storage + UPLOAD_HASH = "000000000000000000000000000000000000" + BLOB_STORAGE_REQUEST_HOST = "fake_account_name.blob.core.windows.net" + + +class AzureMLResourceTypes: + CONNECTION = "Microsoft.MachineLearningServices/workspaces/connections" + DATASTORE = "Microsoft.MachineLearningServices/workspaces/datastores" + WORKSPACE = "Microsoft.MachineLearningServices/workspaces" + + +TEST_CLASSES_FOR_RUN_INTEGRATION_TEST_RECORDING = [ + "TestCliWithAzure", + "TestFlowRun", +] diff --git a/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/processors.py b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/processors.py new file mode 100644 index 00000000000..ddd3525fb7b --- /dev/null +++ b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/processors.py @@ -0,0 +1,179 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +import base64 +import json +from typing import Dict + +from vcr.request import Request + +from .constants import AzureMLResourceTypes, SanitizedValues +from .utils import ( + is_json_payload_request, + is_json_payload_response, + sanitize_azure_workspace_triad, + sanitize_upload_hash, +) + + +class RecordingProcessor: + def process_request(self, request: Request) -> Request: + return request + + def process_response(self, response: Dict) -> Dict: + return response + + +class AzureWorkspaceTriadProcessor(RecordingProcessor): + """Sanitize subscription id, resource group name and workspace name.""" + + def process_request(self, request: Request) -> Request: + request.uri = sanitize_azure_workspace_triad(request.uri) + return request + + def process_response(self, response: Dict) -> Dict: + response["body"]["string"] = sanitize_azure_workspace_triad(response["body"]["string"]) + return response + + +class AzureResourceProcessor(RecordingProcessor): + """Sanitize sensitive data in Azure resource GET response.""" + + def __init__(self): + # datastore related + self.storage_account_names = set() + self.storage_container_names = set() + + def _sanitize_request_url_for_storage(self, uri: str) -> str: + # this instance will store storage account names and container names + # so we can apply the sanitization here with simple string replace rather than regex + for account_name in self.storage_account_names: + uri = uri.replace(account_name, SanitizedValues.FAKE_ACCOUNT_NAME) + for container_name in self.storage_container_names: + uri = uri.replace(container_name, SanitizedValues.FAKE_CONTAINER_NAME) + return uri + + def process_request(self, request: Request) -> Request: + request.uri = self._sanitize_request_url_for_storage(request.uri) + return request + + def _sanitize_response_body(self, body: Dict) -> Dict: + resource_type = body.get("type") + if resource_type == AzureMLResourceTypes.WORKSPACE: + body = self._sanitize_response_for_workspace(body) + elif resource_type == AzureMLResourceTypes.CONNECTION: + body = self._sanitize_response_for_arm_connection(body) + elif resource_type == AzureMLResourceTypes.DATASTORE: + body = self._sanitize_response_for_datastore(body) + return body + + def process_response(self, response: Dict) -> Dict: + if is_json_payload_response(response): + body = json.loads(response["body"]["string"]) + if isinstance(body, dict): + # response can be a list sometimes (e.g. get workspace datastores) + # need to sanitize each with a for loop + if "value" in body: + resources = body["value"] + for i in range(len(resources)): + resources[i] = self._sanitize_response_body(resources[i]) + body["value"] = resources + else: + body = self._sanitize_response_body(body) + response["body"]["string"] = json.dumps(body) + return response + + def _sanitize_response_for_workspace(self, body: Dict) -> Dict: + filter_keys = ["identity", "properties", "systemData"] + for k in filter_keys: + if k in body: + body.pop(k) + + # need during the constructor of FlowServiceCaller (for vNet case) + body["properties"] = {"discoveryUrl": SanitizedValues.DISCOVERY_URL} + + name = body["name"] + body["name"] = SanitizedValues.WORKSPACE_NAME + body["id"] = body["id"].replace(name, SanitizedValues.WORKSPACE_NAME) + return body + + def _sanitize_response_for_arm_connection(self, body: Dict) -> Dict: + if body["properties"]["authType"] == "CustomKeys": + # custom connection, sanitize "properties.credentials.keys" + body["properties"]["credentials"]["keys"] = {} + else: + # others, sanitize "properties.credentials.key" + body["properties"]["credentials"]["key"] = "_" + body["properties"]["target"] = "_" + return body + + def _sanitize_response_for_datastore(self, body: Dict) -> Dict: + body["properties"]["subscriptionId"] = SanitizedValues.SUBSCRIPTION_ID + body["properties"]["resourceGroup"] = SanitizedValues.RESOURCE_GROUP_NAME + self.storage_account_names.add(body["properties"]["accountName"]) + self.storage_container_names.add(body["properties"]["containerName"]) + body["properties"]["accountName"] = SanitizedValues.FAKE_ACCOUNT_NAME + body["properties"]["containerName"] = SanitizedValues.FAKE_CONTAINER_NAME + return body + + +class AzureOpenAIConnectionProcessor(RecordingProcessor): + """Sanitize api_base in AOAI connection GET response.""" + + def process_response(self, response: Dict) -> Dict: + if is_json_payload_response(response): + body = json.loads(response["body"]["string"]) + if isinstance(body, dict) and body.get("connectionType") == "AzureOpenAI": + body["configs"]["api_base"] = SanitizedValues.FAKE_API_BASE + response["body"]["string"] = json.dumps(body) + return response + + +class StorageProcessor(RecordingProcessor): + """Sanitize sensitive data during storage operations when submit run.""" + + def process_request(self, request: Request) -> Request: + request.uri = sanitize_upload_hash(request.uri) + if is_json_payload_request(request) and request.body is not None: + body = request.body.decode("utf-8") + body = sanitize_upload_hash(body) + request.body = body.encode("utf-8") + return request + + def process_response(self, response: Dict) -> Dict: + if is_json_payload_response(response): + body = json.loads(response["body"]["string"]) + if isinstance(body, dict): + self._sanitize_list_secrets_response(body) + response["body"]["string"] = json.dumps(body) + return response + + def _sanitize_list_secrets_response(self, body: Dict) -> Dict: + if "key" in body: + b64_key = base64.b64encode(SanitizedValues.FAKE_KEY.encode("ascii")) + body["key"] = str(b64_key, "ascii") + return body + + +class DropProcessor(RecordingProcessor): + """Ignore some requests that won't be used during playback.""" + + def process_request(self, request: Request) -> Request: + if "/metadata/identity/oauth2/token" in request.path: + return None + return request + + +class TenantProcessor(RecordingProcessor): + """Sanitize tenant id in responses.""" + + def __init__(self, tenant_id: str): + self.tenant_id = tenant_id + + def process_response(self, response: Dict) -> Dict: + if is_json_payload_response(response): + response["body"]["string"] = str(response["body"]["string"]).replace( + self.tenant_id, SanitizedValues.TENANT_ID + ) + return response diff --git a/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/utils.py b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/utils.py new file mode 100644 index 00000000000..7afc1c3c574 --- /dev/null +++ b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/utils.py @@ -0,0 +1,143 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +import os +import re +from dataclasses import dataclass +from typing import Dict + +from azure.ai.ml import MLClient +from azure.ai.ml.entities import Workspace +from azure.core.credentials import AccessToken +from vcr.request import Request + +from promptflow.azure import PFClient + +from .constants import SKIP_LIVE_RECORDING, TEST_RUN_LIVE, SanitizedValues + + +def is_live() -> bool: + return os.getenv(TEST_RUN_LIVE, "true") == "true" + + +def is_live_and_not_recording() -> bool: + return is_live() and os.getenv(SKIP_LIVE_RECORDING, "true") == "true" + + +class FakeTokenCredential: + """Refer from Azure SDK for Python repository. + + https://github.com/Azure/azure-sdk-for-python/blob/main/tools/azure-sdk-tools/devtools_testutils/fake_credentials.py + """ + + def __init__(self): + self.token = AccessToken("YOU SHALL NOT PASS", 0) + self.get_token_count = 0 + + def get_token(self, *args, **kwargs) -> AccessToken: + self.get_token_count += 1 + return self.token + + +@dataclass +class MockDatastore: + """Mock Datastore class for `DatastoreOperations.get_default().name`.""" + + name: str + + +def mock_datastore_get_default(*args, **kwargs) -> MockDatastore: + return MockDatastore(name="workspaceblobstore") + + +def mock_workspace_get(*args, **kwargs) -> Workspace: + return Workspace( + name=SanitizedValues.WORKSPACE_NAME, + resource_group=SanitizedValues.RESOURCE_GROUP_NAME, + discovery_url=SanitizedValues.DISCOVERY_URL, + ) + + +def get_pf_client_for_playback() -> PFClient: + ml_client = MLClient( + credential=FakeTokenCredential(), + subscription_id=SanitizedValues.SUBSCRIPTION_ID, + resource_group_name=SanitizedValues.RESOURCE_GROUP_NAME, + workspace_name=SanitizedValues.WORKSPACE_NAME, + ) + ml_client.datastores.get_default = mock_datastore_get_default + ml_client.workspaces.get = mock_workspace_get + return PFClient(ml_client=ml_client) + + +def sanitize_azure_workspace_triad(value: str) -> str: + sanitized_sub = re.sub( + "/(subscriptions)/[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}", + r"/\1/{}".format("00000000-0000-0000-0000-000000000000"), + value, + flags=re.IGNORECASE, + ) + # for regex pattern for resource group name and workspace name, refer from: + # https://learn.microsoft.com/en-us/rest/api/resources/resource-groups/create-or-update?tabs=HTTP + sanitized_rg = re.sub( + r"/(resourceGroups)/[-\w\._\(\)]+", + r"/\1/{}".format("00000"), + sanitized_sub, + flags=re.IGNORECASE, + ) + sanitized_ws = re.sub( + r"/(workspaces)/[-\w\._\(\)]+[/?]", + r"/\1/{}/".format("00000"), + sanitized_rg, + flags=re.IGNORECASE, + ) + + # workspace name can be the last part of the string + # e.g. xxx/Microsoft.MachineLearningServices/workspaces/ + # apply a special handle here to sanitize + if sanitized_ws.startswith("https://"): + split1, split2 = sanitized_ws.split("/")[-2:] + if split1 == "workspaces": + sanitized_ws = sanitized_ws.replace(split2, SanitizedValues.WORKSPACE_NAME) + + return sanitized_ws + + +def sanitize_upload_hash(value: str) -> str: + value = re.sub( + r"(az-ml-artifacts)/([0-9a-f]{32})", + r"\1/{}".format(SanitizedValues.UPLOAD_HASH), + value, + flags=re.IGNORECASE, + ) + value = re.sub( + r"(LocalUpload)/([0-9a-f]{32})", + r"\1/{}".format(SanitizedValues.UPLOAD_HASH), + value, + flags=re.IGNORECASE, + ) + return value + + +def _is_json_payload(headers: Dict, key: str) -> bool: + if not headers: + return False + content_type = headers.get(key) + if not content_type: + return False + # content-type can be an array, e.g. ["application/json; charset=utf-8"] + content_type = content_type[0] if isinstance(content_type, list) else content_type + content_type = content_type.split(";")[0].lower() + return "application/json" in content_type + + +def is_json_payload_request(request: Request) -> bool: + headers = request.headers + return _is_json_payload(headers, key="Content-Type") + + +def is_json_payload_response(response: Dict) -> bool: + headers = response.get("headers") + # PFAzureIntegrationTestRecording will lower keys in response headers + return _is_json_payload(headers, key="content-type") diff --git a/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/variable_recorder.py b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/variable_recorder.py new file mode 100644 index 00000000000..200b7c2c73c --- /dev/null +++ b/src/promptflow/tests/sdk_cli_azure_test/recording_utilities/variable_recorder.py @@ -0,0 +1,36 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- + +from typing import Dict + +from vcr.request import Request + +from .utils import is_json_payload_request + + +class VariableRecorder: + def __init__(self): + self.variables = dict() + + def get_or_record_variable(self, variable: str, default: str) -> str: + return self.variables.setdefault(variable, default) + + def sanitize_request(self, request: Request) -> Request: + request.uri = self._sanitize(request.uri) + if is_json_payload_request(request) and request.body is not None: + body = request.body.decode("utf-8") + body = self._sanitize(body) + request.body = body.encode("utf-8") + return request + + def sanitize_response(self, response: Dict) -> Dict: + response["body"]["string"] = response["body"]["string"].decode("utf-8") + response["body"]["string"] = self._sanitize(response["body"]["string"]) + response["body"]["string"] = response["body"]["string"].encode("utf-8") + return response + + def _sanitize(self, value: str) -> str: + for k, v in self.variables.items(): + value = value.replace(v, k) + return value diff --git a/src/promptflow/tests/sdk_cli_azure_test/unittests/test_pf_client.py b/src/promptflow/tests/sdk_cli_azure_test/unittests/test_pf_client.py new file mode 100644 index 00000000000..b50ccf5d33e --- /dev/null +++ b/src/promptflow/tests/sdk_cli_azure_test/unittests/test_pf_client.py @@ -0,0 +1,75 @@ +# --------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# --------------------------------------------------------- +import mock +import pytest +from azure.ai.ml.constants._common import AZUREML_RESOURCE_PROVIDER, RESOURCE_ID_FORMAT + +from promptflow import PFClient +from promptflow._sdk.operations._connection_operations import ConnectionOperations +from promptflow._sdk.operations._local_azure_connection_operations import LocalAzureConnectionOperations + + +@pytest.mark.sdk_test +@pytest.mark.e2etest +class TestPFClient: + # Test pf client when connection provider is azureml. + # This tests suites need azure dependencies. + def test_connection_provider(self): + target = "promptflow._sdk._pf_client.Configuration" + with mock.patch(target) as mocked: + mocked.return_value.get_connection_provider.return_value = "abc" + with pytest.raises(ValueError) as e: + client = PFClient() + assert client.connections + assert "Unsupported connection provider" in str(e.value) + + with mock.patch(target) as mocked: + mocked.return_value.get_connection_provider.return_value = "azureml:xx" + with pytest.raises(ValueError) as e: + client = PFClient() + assert client.connections + assert "Malformed connection provider string" in str(e.value) + + with mock.patch(target) as mocked: + mocked.return_value.get_connection_provider.return_value = "local" + client = PFClient() + assert isinstance(client.connections, ConnectionOperations) + + with mock.patch(target) as mocked: + mocked.return_value.get_connection_provider.return_value = "azureml:" + RESOURCE_ID_FORMAT.format( + "96aede12-2f73-41cb-b983-6d11a904839b", "promptflow", AZUREML_RESOURCE_PROVIDER, "promptflow-eastus" + ) + client = PFClient() + assert isinstance(client.connections, LocalAzureConnectionOperations) + + client = PFClient( + config={ + "connection.provider": "azureml:" + + RESOURCE_ID_FORMAT.format( + "96aede12-2f73-41cb-b983-6d11a904839b", "promptflow", AZUREML_RESOURCE_PROVIDER, "promptflow-eastus" + ) + } + ) + assert isinstance(client.connections, LocalAzureConnectionOperations) + + def test_local_azure_connection_extract_workspace(self): + res = LocalAzureConnectionOperations._extract_workspace( + "azureml:/subscriptions/123/resourceGroups/456/providers/Microsoft.MachineLearningServices/workspaces/789" + ) + assert res == ("123", "456", "789") + + res = LocalAzureConnectionOperations._extract_workspace( + "azureml:/subscriptions/123/resourcegroups/456/workspaces/789" + ) + assert res == ("123", "456", "789") + + with pytest.raises(ValueError) as e: + LocalAzureConnectionOperations._extract_workspace("azureml:xx") + assert "Malformed connection provider string" in str(e.value) + + with pytest.raises(ValueError) as e: + LocalAzureConnectionOperations._extract_workspace( + "azureml:/subscriptions/123/resourceGroups/456/providers/Microsoft.MachineLearningServices/workspaces/" + ) + assert "Malformed connection provider string" in str(e.value) diff --git a/src/promptflow/tests/sdk_cli_test/e2etests/test_cli.py b/src/promptflow/tests/sdk_cli_test/e2etests/test_cli.py index d6e5a307b83..6e3545d9b98 100644 --- a/src/promptflow/tests/sdk_cli_test/e2etests/test_cli.py +++ b/src/promptflow/tests/sdk_cli_test/e2etests/test_cli.py @@ -6,6 +6,7 @@ import logging import os import os.path +import subprocess import shutil import sys import tempfile @@ -20,6 +21,7 @@ from promptflow._cli._pf.entry import main from promptflow._sdk._constants import LOGGER_NAME, SCRUBBED_VALUE +from promptflow._sdk._errors import RunNotFoundError from promptflow._sdk.operations._local_storage_operations import LocalStorageOperations from promptflow._sdk.operations._run_operations import RunOperations from promptflow._utils.context_utils import _change_working_dir @@ -1119,6 +1121,42 @@ def get_node_settings(_flow_dag_path: Path): ) assert get_node_settings(Path(source)) != get_node_settings(new_flow_dag_path) + def test_flow_build_executable(self): + source = f"{FLOWS_DIR}/web_classification/flow.dag.yaml" + target = "promptflow._sdk.operations._flow_operations.FlowOperations._run_pyinstaller" + with mock.patch(target) as mocked: + mocked.return_value = None + + with tempfile.TemporaryDirectory() as temp_dir: + run_pf_command( + "flow", + "build", + "--source", + source, + "--output", + temp_dir, + "--format", + "executable", + ) + # Start the Python script as a subprocess + app_file = Path(temp_dir, "app.py").as_posix() + process = subprocess.Popen(['python', app_file], stderr=subprocess.PIPE) + try: + # Wait for a specified time (in seconds) + wait_time = 5 + process.wait(timeout=wait_time) + if process.returncode == 0: + pass + else: + raise Exception(f"Process terminated with exit code {process.returncode}, " + f"{process.stderr.read().decode('utf-8')}") + except (subprocess.TimeoutExpired, KeyboardInterrupt): + pass + finally: + # Kill the process + process.terminate() + process.wait() # Ensure the process is fully terminated + @pytest.mark.parametrize( "file_name, expected, update_item", [ @@ -1212,28 +1250,27 @@ def mocked_visualize(*args, **kwargs): name, ) - def test_pf_run_with_stream_log(self): - f = io.StringIO() - # with --stream will show logs in stdout - with contextlib.redirect_stdout(f): - run_pf_command( - "run", - "create", - "--flow", - f"{FLOWS_DIR}/flow_with_user_output", - "--data", - f"{DATAS_DIR}/webClassification3.jsonl", - "--column-mapping", - "key=value", - "extra=${data.url}", - "--stream", - ) - logs = f.getvalue() + def test_pf_run_with_stream_log(self, capfd): + run_pf_command( + "run", + "create", + "--flow", + f"{FLOWS_DIR}/flow_with_user_output", + "--data", + f"{DATAS_DIR}/webClassification3.jsonl", + "--column-mapping", + "key=value", + "extra=${data.url}", + "--stream", + ) + out, _ = capfd.readouterr() # For Batch run, the executor uses bulk logger to print logs, and only prints the error log of the nodes. existing_keywords = ["execution", "execution.bulk", "WARNING", "error log"] - assert all([keyword in logs for keyword in existing_keywords]) non_existing_keywords = ["execution.flow", "user log"] - assert all([keyword not in logs for keyword in non_existing_keywords]) + for keyword in existing_keywords: + assert keyword in out + for keyword in non_existing_keywords: + assert keyword not in out def test_pf_run_no_stream_log(self): f = io.StringIO() @@ -1345,6 +1382,54 @@ def test_tool_init(self, capsys): outerr = capsys.readouterr() assert f"The tool name {invalid_tool_name} is a invalid identifier." in outerr.out + # Test init package tool with extra info + package_name = "tool_with_extra_info" + package_folder = Path(temp_dir) / package_name + icon_path = Path(DATAS_DIR) / "logo.jpg" + category = "test_category" + tags = {"tag1": "value1", "tag2": "value2"} + run_pf_command( + "tool", + "init", + "--package", + package_name, + "--tool", + func_name, + "--set", + f"icon={icon_path.absolute()}", + f"category={category}", + f"tags={tags}", + cwd=temp_dir, + ) + spec = importlib.util.spec_from_file_location( + f"{package_name}.utils", package_folder / package_name / "utils.py" + ) + utils = importlib.util.module_from_spec(spec) + spec.loader.exec_module(utils) + + assert hasattr(utils, "list_package_tools") + tools_meta = utils.list_package_tools() + meta = tools_meta[f"{package_name}.{func_name}.{func_name}"] + assert meta["category"] == category + assert meta["tags"] == tags + assert meta["icon"].startswith("data:image") + + # icon doesn't exist + with pytest.raises(SystemExit): + run_pf_command( + "tool", + "init", + "--package", + package_name, + "--tool", + func_name, + "--set", + "icon=invalid_icon_path", + cwd=temp_dir, + ) + outerr = capsys.readouterr() + assert "Cannot find the icon path" in outerr.out + def test_tool_list(self, capsys): # List package tools in environment run_pf_command("tool", "list") @@ -1401,13 +1486,47 @@ def test_flow_test_with_image_input_and_output(self): "flow", "test", "--flow", - f"{FLOWS_DIR}/python_tool_with_image_input_and_output", + f"{FLOWS_DIR}/python_tool_with_simple_image", ) - output_path = Path(FLOWS_DIR) / "python_tool_with_image_input_and_output" / ".promptflow" / "output" + output_path = Path(FLOWS_DIR) / "python_tool_with_simple_image" / ".promptflow" / "output" assert output_path.exists() - image_path = Path(FLOWS_DIR) / "python_tool_with_image_input_and_output" / ".promptflow" / "intermediate" + image_path = Path(FLOWS_DIR) / "python_tool_with_simple_image" / ".promptflow" / "intermediate" assert image_path.exists() + def test_run_file_with_set(self, pf) -> None: + name = str(uuid.uuid4()) + run_pf_command( + "run", + "create", + "--file", + f"{RUNS_DIR}/run_with_env.yaml", + "--set", + f"name={name}", + ) + # run exists + pf.runs.get(name=name) + + def test_run_file_with_set_priority(self, pf) -> None: + # --name has higher priority than --set + name1 = str(uuid.uuid4()) + name2 = str(uuid.uuid4()) + run_pf_command( + "run", + "create", + "--file", + f"{RUNS_DIR}/run_with_env.yaml", + "--set", + f"name={name1}", + "--name", + name2, + ) + # run exists + try: + pf.runs.get(name=name1) + except RunNotFoundError: + pass + pf.runs.get(name=name2) + def test_data_scrubbing(self): # Prepare connection run_pf_command( @@ -1417,6 +1536,7 @@ def test_data_scrubbing(self): f"{CONNECTIONS_DIR}/custom_connection.yaml", "--name", "custom_connection") + # Test flow run run_pf_command( "flow", diff --git a/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_run.py b/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_run.py index 2546edaf478..45fcc45077d 100644 --- a/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_run.py +++ b/src/promptflow/tests/sdk_cli_test/e2etests/test_flow_run.py @@ -37,6 +37,13 @@ def create_run_against_multi_line_data(client) -> Run: ) +def create_run_against_multi_line_data_without_llm(client: PFClient) -> Run: + return client.run( + flow=f"{FLOWS_DIR}/print_env_var", + data=f"{DATAS_DIR}/env_var_names.jsonl", + ) + + def create_run_against_run(client, run: Run) -> Run: return client.run( flow=f"{FLOWS_DIR}/classification_accuracy_evaluation", @@ -745,3 +752,42 @@ def test_system_metrics_in_properties(self, pf) -> None: assert FlowRunProperties.SYSTEM_METRICS in run.properties assert isinstance(run.properties[FlowRunProperties.SYSTEM_METRICS], dict) assert "total_tokens" in run.properties[FlowRunProperties.SYSTEM_METRICS] + + def test_run_get_inputs(self, pf): + # inputs should be persisted when defaults are used + run = pf.run( + flow=f"{FLOWS_DIR}/default_input", + data=f"{DATAS_DIR}/webClassification1.jsonl", + ) + inputs = pf.runs._get_inputs(run=run) + assert inputs == {"line_number": [0], "question": ["input value from default"]} + + # inputs should be persisted when data value are used + run = pf.run( + flow=f"{FLOWS_DIR}/flow_with_dict_input", + data=f"{DATAS_DIR}/dictInput1.jsonl", + ) + inputs = pf.runs._get_inputs(run=run) + assert inputs == {"key": [{"key": "value in data"}], "line_number": [0]} + + # inputs should be persisted when column-mapping are used + run = pf.run( + flow=f"{FLOWS_DIR}/flow_with_dict_input", + data=f"{DATAS_DIR}/webClassification1.jsonl", + column_mapping={"key": {"value": "value in column-mapping"}, "url": "${data.url}"}, + ) + inputs = pf.runs._get_inputs(run=run) + assert inputs == { + "key": [{"value": "value in column-mapping"}], + "line_number": [0], + "url": ["https://www.youtube.com/watch?v=o5ZQyXaAv1g"], + } + + def test_executor_logs_in_batch_run_logs(self, pf: PFClient) -> None: + run = create_run_against_multi_line_data_without_llm(pf) + local_storage = LocalStorageOperations(run=run) + logs = local_storage.logger.get_logs() + # below warning is printed by executor before the batch run executed + # the warning message results from we do not use column mapping + # so it is expected to be printed here + assert "Starting run without column mapping may lead to unexpected results." in logs diff --git a/src/promptflow/tests/sdk_cli_test/unittests/test_pf_client.py b/src/promptflow/tests/sdk_cli_test/unittests/test_pf_client.py index 53ee82f1b9b..15814794611 100644 --- a/src/promptflow/tests/sdk_cli_test/unittests/test_pf_client.py +++ b/src/promptflow/tests/sdk_cli_test/unittests/test_pf_client.py @@ -1,14 +1,10 @@ # --------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # --------------------------------------------------------- -import mock import pytest -from azure.ai.ml.constants._common import AZUREML_RESOURCE_PROVIDER, RESOURCE_ID_FORMAT from promptflow import PFClient from promptflow._core.operation_context import OperationContext -from promptflow._sdk.operations._connection_operations import ConnectionOperations -from promptflow._sdk.operations._local_azure_connection_operations import LocalAzureConnectionOperations @pytest.mark.sdk_test @@ -17,62 +13,3 @@ class TestPFClient: def test_pf_client_user_agent(self): PFClient() assert "promptflow-sdk" in OperationContext.get_instance().get_user_agent() - - def test_connection_provider(self): - target = "promptflow._sdk._pf_client.Configuration" - with mock.patch(target) as mocked: - mocked.return_value.get_connection_provider.return_value = "abc" - with pytest.raises(ValueError) as e: - client = PFClient() - assert client.connections - assert "Unsupported connection provider" in str(e.value) - - with mock.patch(target) as mocked: - mocked.return_value.get_connection_provider.return_value = "azureml:xx" - with pytest.raises(ValueError) as e: - client = PFClient() - assert client.connections - assert "Malformed connection provider string" in str(e.value) - - with mock.patch(target) as mocked: - mocked.return_value.get_connection_provider.return_value = "local" - client = PFClient() - assert isinstance(client.connections, ConnectionOperations) - - with mock.patch(target) as mocked: - mocked.return_value.get_connection_provider.return_value = "azureml:" + RESOURCE_ID_FORMAT.format( - "96aede12-2f73-41cb-b983-6d11a904839b", "promptflow", AZUREML_RESOURCE_PROVIDER, "promptflow-eastus" - ) - client = PFClient() - assert isinstance(client.connections, LocalAzureConnectionOperations) - - client = PFClient( - config={ - "connection.provider": "azureml:" - + RESOURCE_ID_FORMAT.format( - "96aede12-2f73-41cb-b983-6d11a904839b", "promptflow", AZUREML_RESOURCE_PROVIDER, "promptflow-eastus" - ) - } - ) - assert isinstance(client.connections, LocalAzureConnectionOperations) - - def test_local_azure_connection_extract_workspace(self): - res = LocalAzureConnectionOperations._extract_workspace( - "azureml:/subscriptions/123/resourceGroups/456/providers/Microsoft.MachineLearningServices/workspaces/789" - ) - assert res == ("123", "456", "789") - - res = LocalAzureConnectionOperations._extract_workspace( - "azureml:/subscriptions/123/resourcegroups/456/workspaces/789" - ) - assert res == ("123", "456", "789") - - with pytest.raises(ValueError) as e: - LocalAzureConnectionOperations._extract_workspace("azureml:xx") - assert "Malformed connection provider string" in str(e.value) - - with pytest.raises(ValueError) as e: - LocalAzureConnectionOperations._extract_workspace( - "azureml:/subscriptions/123/resourceGroups/456/providers/Microsoft.MachineLearningServices/workspaces/" - ) - assert "Malformed connection provider string" in str(e.value) diff --git a/src/promptflow/tests/test_configs/datas/dictInput1.jsonl b/src/promptflow/tests/test_configs/datas/dictInput1.jsonl new file mode 100644 index 00000000000..4a9c0bcdbb7 --- /dev/null +++ b/src/promptflow/tests/test_configs/datas/dictInput1.jsonl @@ -0,0 +1 @@ +{"key": {"key": "value in data"}} diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_image_input_and_output/logo.jpg b/src/promptflow/tests/test_configs/datas/logo.jpg similarity index 100% rename from src/promptflow/tests/test_configs/flows/python_tool_with_image_input_and_output/logo.jpg rename to src/promptflow/tests/test_configs/datas/logo.jpg diff --git a/src/promptflow/tests/test_configs/flows/chat_flow_with_image/flow.dag.yaml b/src/promptflow/tests/test_configs/flows/chat_flow_with_image/flow.dag.yaml new file mode 100644 index 00000000000..24206afd19e --- /dev/null +++ b/src/promptflow/tests/test_configs/flows/chat_flow_with_image/flow.dag.yaml @@ -0,0 +1,38 @@ +inputs: + chat_history: + type: list + default: + - inputs: + outputs: + answer: answer to the first question + question: + - the first question + - data:image/jpg;path: logo.jpg + - inputs: + outputs: + answer: answer to the second question + question: + - the second question + - data:image/png;path: logo_2.png + is_chat_history: true + question: + type: list + default: + - the third question + - data:image/jpg;path: logo.jpg + - data:image/png;path: logo_2.png + is_chat_input: true +outputs: + output: + type: string + reference: ${mock_chat_node.output} + is_chat_output: true +nodes: +- name: mock_chat_node + type: python + source: + type: code + path: mock_chat.py + inputs: + chat_history: ${inputs.chat_history} + question: ${inputs.question} diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/logo.jpg b/src/promptflow/tests/test_configs/flows/chat_flow_with_image/logo.jpg similarity index 100% rename from src/promptflow/tests/test_configs/flows/python_tool_with_image_list/logo.jpg rename to src/promptflow/tests/test_configs/flows/chat_flow_with_image/logo.jpg diff --git a/src/promptflow/tests/test_configs/flows/chat_flow_with_image/logo_2.png b/src/promptflow/tests/test_configs/flows/chat_flow_with_image/logo_2.png new file mode 100644 index 00000000000..27f294983ff Binary files /dev/null and b/src/promptflow/tests/test_configs/flows/chat_flow_with_image/logo_2.png differ diff --git a/src/promptflow/tests/test_configs/flows/chat_flow_with_image/mock_chat.py b/src/promptflow/tests/test_configs/flows/chat_flow_with_image/mock_chat.py new file mode 100644 index 00000000000..ef8bd4c69bd --- /dev/null +++ b/src/promptflow/tests/test_configs/flows/chat_flow_with_image/mock_chat.py @@ -0,0 +1,11 @@ +from promptflow import tool +from promptflow.contracts.multimedia import Image + + +@tool +def mock_chat(chat_history: list, question: list): + res = [] + for item in question: + if isinstance(item, Image): + res.append(item) + return res diff --git a/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/stream.py b/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/stream.py index 352f8ed6031..5a8601fdfed 100644 --- a/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/stream.py +++ b/src/promptflow/tests/test_configs/flows/chat_flow_with_python_node_streaming_output/stream.py @@ -1,5 +1,5 @@ from promptflow import tool -from typing import Generator +from typing import Generator, List def stream(question: str) -> Generator[str, None, None]: @@ -8,5 +8,5 @@ def stream(question: str) -> Generator[str, None, None]: @tool -def my_python_tool(chat_history: list[dict], question: str) -> dict: +def my_python_tool(chat_history: List[dict], question: str) -> dict: return {"answer": stream(question)} diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/flow.dag.yaml b/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/flow.dag.yaml new file mode 100644 index 00000000000..c089b087ed0 --- /dev/null +++ b/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/flow.dag.yaml @@ -0,0 +1,48 @@ +inputs: + image_list: + type: list + default: + - data:image/jpg;path: logo.jpg + - data:image/png;path: logo_2.png + image_dict: + type: object + default: + image_1: + data:image/jpg;path: logo.jpg + image_2: + data:image/png;path: logo_2.png +outputs: + output: + type: list + reference: ${python_node_3.output} +nodes: +- name: python_node + type: python + source: + type: code + path: passthrough_list.py + inputs: + image_list: ${inputs.image_list} + image_dict: ${inputs.image_dict} +- name: python_node_2 + type: python + source: + type: code + path: passthrough_dict.py + inputs: + image_list: + - data:image/jpg;path: logo.jpg + - data:image/png;path: logo_2.png + image_dict: + image_1: + data:image/jpg;path: logo.jpg + image_2: + data:image/png;path: logo_2.png +- name: python_node_3 + type: python + source: + type: code + path: passthrough_list.py + inputs: + image_list: ${python_node.output} + image_dict: ${python_node_2.output} diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/logo.jpg b/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/logo.jpg similarity index 100% rename from src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/logo.jpg rename to src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/logo.jpg diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/logo_2.png b/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/logo_2.png new file mode 100644 index 00000000000..27f294983ff Binary files /dev/null and b/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/logo_2.png differ diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_dict.py b/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_dict.py new file mode 100644 index 00000000000..8c44ebe595e --- /dev/null +++ b/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_dict.py @@ -0,0 +1,6 @@ +from promptflow import tool + + +@tool +def passthrough_dict(image_list: list, image_dict: dict): + return image_dict diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_list.py b/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_list.py new file mode 100644 index 00000000000..6b4e31f12c1 --- /dev/null +++ b/src/promptflow/tests/test_configs/flows/python_tool_with_composite_image/passthrough_list.py @@ -0,0 +1,5 @@ +from promptflow import tool + +@tool +def passthrough_list(image_list: list, image_dict: dict): + return image_list diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_image_input_and_output/flow.dag.yaml b/src/promptflow/tests/test_configs/flows/python_tool_with_image_input_and_output/flow.dag.yaml deleted file mode 100644 index 4dcfeafc95d..00000000000 --- a/src/promptflow/tests/test_configs/flows/python_tool_with_image_input_and_output/flow.dag.yaml +++ /dev/null @@ -1,16 +0,0 @@ -inputs: - image: - type: image - default: logo.jpg -outputs: - output: - type: image - reference: ${python_node.output} -nodes: -- name: python_node - type: python - source: - type: code - path: python_with_image.py - inputs: - image: ${inputs.image} \ No newline at end of file diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_image_input_and_output/python_with_image.py b/src/promptflow/tests/test_configs/flows/python_tool_with_image_input_and_output/python_with_image.py deleted file mode 100644 index 3b0770ebfd6..00000000000 --- a/src/promptflow/tests/test_configs/flows/python_tool_with_image_input_and_output/python_with_image.py +++ /dev/null @@ -1,7 +0,0 @@ -from promptflow.contracts.multimedia import Image -from promptflow import tool - - -@tool -def python_with_image(image: Image) -> Image: - return image diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/flow.dag.yaml b/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/flow.dag.yaml deleted file mode 100644 index 5ab71cb5a9c..00000000000 --- a/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/flow.dag.yaml +++ /dev/null @@ -1,33 +0,0 @@ -inputs: - image_list: - type: list - default: - - data:image/jpg;path: logo.jpg - - data:image/jpg;path: logo_2.jpg - - data:image/jpg;path: logo_3.jpg - image_dict: - type: object - default: - - image_1: - data:image/jpg;path: logo.jpg - - image_2: - data:image/jpg;path: logo_2.jpg -outputs: - output: - type: list - reference: ${python_node.output} -nodes: -- name: python_node - type: python - source: - type: code - path: pick_images_from_list.py - inputs: - image_list: - - data:image/jpg;path: logo.jpg - - data:image/jpg;path: logo_2.jpg - - data:image/jpg;path: logo_3.jpg - image_list_2: ${inputs.image_list} - image_dict: ${inputs.image_dict} - idx_1: 1 - idx_2: 2 diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/logo_3.JPG b/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/logo_3.JPG deleted file mode 100644 index 155609310c8..00000000000 Binary files a/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/logo_3.JPG and /dev/null differ diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/pick_images_from_list.py b/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/pick_images_from_list.py deleted file mode 100644 index 1b439593974..00000000000 --- a/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/pick_images_from_list.py +++ /dev/null @@ -1,16 +0,0 @@ -from promptflow import tool -from promptflow.contracts.multimedia import Image - - -@tool -def pick_images_from_list( - image_list: list[Image], - image_list_2: list[Image], - image_dict: dict, - idx_1: int, - idx_2: int -) -> list[Image]: - if idx_1 >= 0 and idx_1 < len(image_list) and idx_2 >= 0 and idx_2 < len(image_list_2): - return {"Image list": [image_list[idx_1], image_list_2[idx_2]], "Image dict": image_dict} - else: - raise Exception(f"Invalid index.") diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/flow.dag.yaml b/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/flow.dag.yaml deleted file mode 100644 index 806c5a0c39f..00000000000 --- a/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/flow.dag.yaml +++ /dev/null @@ -1,31 +0,0 @@ -inputs: - image: - type: image - default: logo.jpg - image_name: - type: string - default: Microsoft's logo - logo_content: - type: string - default: Microsoft and four squares -outputs: - output: - type: image - reference: ${python_node_2.output} -nodes: -- name: python_node - type: python - source: - type: code - path: python_with_image.py - inputs: - image: ${inputs.image} - image_name: ${inputs.image_name} -- name: python_node_2 - type: python - source: - type: code - path: python_node_2.py - inputs: - image_dict: ${python_node.output} - logo_content: ${inputs.logo_content} diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/python_node_2.py b/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/python_node_2.py deleted file mode 100644 index 68791f1c419..00000000000 --- a/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/python_node_2.py +++ /dev/null @@ -1,9 +0,0 @@ -from promptflow.contracts.multimedia import Image -from promptflow import tool - - -@tool -def python_with_image(image_dict: dict, logo_content: str) -> Image: - image_dict["image_list2"] = [image_dict["image"], image_dict["image"]] - image_dict["logo_content"] = logo_content - return image_dict diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/python_with_image.py b/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/python_with_image.py deleted file mode 100644 index bd0a3fb7072..00000000000 --- a/src/promptflow/tests/test_configs/flows/python_tool_with_multiple_image_nodes/python_with_image.py +++ /dev/null @@ -1,7 +0,0 @@ -from promptflow.contracts.multimedia import Image -from promptflow import tool - - -@tool -def python_with_image(image: Image, image_name: str) -> Image: - return {"image": image, "image_name": image_name, "image_list": [image, image]} diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/flow.dag.yaml b/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/flow.dag.yaml new file mode 100644 index 00000000000..248838114eb --- /dev/null +++ b/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/flow.dag.yaml @@ -0,0 +1,25 @@ +inputs: + image: + type: image + default: logo.jpg +outputs: + output: + type: image + reference: ${python_node_2.output} +nodes: +- name: python_node + type: python + source: + type: code + path: pick_an_image.py + inputs: + image_1: ${inputs.image} + image_2: logo_2.png +- name: python_node_2 + type: python + source: + type: code + path: pick_an_image.py + inputs: + image_1: ${python_node.output} + image_2: logo_2.png diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_image_list/logo_2.JPG b/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/logo.jpg similarity index 100% rename from src/promptflow/tests/test_configs/flows/python_tool_with_image_list/logo_2.JPG rename to src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/logo.jpg diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/logo_2.png b/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/logo_2.png new file mode 100644 index 00000000000..27f294983ff Binary files /dev/null and b/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/logo_2.png differ diff --git a/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/pick_an_image.py b/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/pick_an_image.py new file mode 100644 index 00000000000..714282aa2c9 --- /dev/null +++ b/src/promptflow/tests/test_configs/flows/python_tool_with_simple_image/pick_an_image.py @@ -0,0 +1,12 @@ +import random + +from promptflow.contracts.multimedia import Image +from promptflow import tool + + +@tool +def pick_an_image(image_1: Image, image_2: Image) -> Image: + if random.choice([True, False]): + return image_1 + else: + return image_2 diff --git a/src/promptflow/tests/test_configs/flows/script_tool_with_init/flow.dag.yaml b/src/promptflow/tests/test_configs/flows/script_tool_with_init/flow.dag.yaml new file mode 100644 index 00000000000..84ecb276eff --- /dev/null +++ b/src/promptflow/tests/test_configs/flows/script_tool_with_init/flow.dag.yaml @@ -0,0 +1,17 @@ +inputs: + input: + type: string + default: World +outputs: + output: + type: string + reference: ${script_tool_with_init.output} +nodes: +- name: script_tool_with_init + type: python + source: + type: code + path: script_tool_with_init.py + inputs: + init_input: Hello + input: ${inputs.input} diff --git a/src/promptflow/tests/test_configs/flows/script_tool_with_init/script_tool_with_init.py b/src/promptflow/tests/test_configs/flows/script_tool_with_init/script_tool_with_init.py new file mode 100644 index 00000000000..cca616b8ba8 --- /dev/null +++ b/src/promptflow/tests/test_configs/flows/script_tool_with_init/script_tool_with_init.py @@ -0,0 +1,11 @@ +from promptflow import ToolProvider, tool + + +class ScriptToolWithInit(ToolProvider): + def __init__(self, init_input: str): + super().__init__() + self.init_input = init_input + + @tool + def call(self, input: str): + return str.join(" ", [self.init_input, input]) diff --git a/src/promptflow/tests/test_configs/recordings/test_arm_connection_operations_TestArmConnectionOperations_test_get_connection.yaml b/src/promptflow/tests/test_configs/recordings/test_arm_connection_operations_TestArmConnectionOperations_test_get_connection.yaml new file mode 100644 index 00000000000..4e5baeffc20 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_arm_connection_operations_TestArmConnectionOperations_test_get_connection.yaml @@ -0,0 +1,199 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/api-version=2023-06-01-preview + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.022' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?api-version=2023-04-01-preview&count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.268' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/azure_open_ai_connection/listsecrets?api-version=2023-04-01-preview + response: + body: + string: '{"tags": null, "location": null, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/azure_open_ai_connection", + "name": "azure_open_ai_connection", "type": "Microsoft.MachineLearningServices/workspaces/connections", + "properties": {"authType": "ApiKey", "credentials": {"key": "_"}, "category": + "AzureOpenAI", "expiryTime": null, "target": "_", "createdByWorkspaceArmId": + null, "isSharedToAll": false, "sharedUserList": [], "metadata": {"azureml.flow.connection_type": + "AzureOpenAI", "azureml.flow.module": "promptflow.connections", "ApiType": + "azure", "ApiVersion": "2023-07-01-preview", "ResourceId": null}}, "systemData": + {"createdAt": "2023-08-22T10:15:34.5762053Z", "createdBy": "yigao@microsoft.com", + "createdByType": "User", "lastModifiedAt": "2023-08-22T10:15:34.5762053Z", + "lastModifiedBy": "yigao@microsoft.com", "lastModifiedByType": "User"}}' + headers: + cache-control: + - no-cache + content-length: + - '1170' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.197' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/custom_connection/listsecrets?api-version=2023-04-01-preview + response: + body: + string: '{"tags": null, "location": null, "id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/connections/custom_connection", + "name": "custom_connection", "type": "Microsoft.MachineLearningServices/workspaces/connections", + "properties": {"authType": "CustomKeys", "credentials": {"keys": {}}, "category": + "CustomKeys", "expiryTime": null, "target": "_", "createdByWorkspaceArmId": + null, "isSharedToAll": false, "sharedUserList": [], "metadata": {"azureml.flow.connection_type": + "Custom", "azureml.flow.module": "promptflow.connections"}}, "systemData": + {"createdAt": "2023-06-19T20:56:12.0353964Z", "createdBy": "sejuare@microsoft.com", + "createdByType": "User", "lastModifiedAt": "2023-06-19T20:56:12.0353964Z", + "lastModifiedBy": "sejuare@microsoft.com", "lastModifiedByType": "User"}}' + headers: + cache-control: + - no-cache + content-length: + - '1251' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.259' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_cli_with_azure_TestCliWithAzure_test_basic_flow_run_bulk_without_env.yaml b/src/promptflow/tests/test_configs/recordings/test_cli_with_azure_TestCliWithAzure_test_basic_flow_run_bulk_without_env.yaml new file mode 100644 index 00000000000..eb61884a47e --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_cli_with_azure_TestCliWithAzure_test_basic_flow_run_bulk_without_env.yaml @@ -0,0 +1,764 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding,Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.077' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.077' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Sat, 21 Oct 2023 10:18:15 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '379' + content-md5: + - lI/pz9jzTQ7Td3RHPL7y7w== + content-type: + - application/octet-stream + last-modified: + - Tue, 25 Jul 2023 06:21:56 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Tue, 25 Jul 2023 06:21:56 GMT + x-ms-meta-name: + - e0068493-1fbe-451c-96b3-cf6b013632ad + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 1f73938f-def0-4a75-b4d0-6b07a2378e1b + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Sat, 21 Oct 2023 10:18:16 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding,Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.097' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.082' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Sat, 21 Oct 2023 10:18:20 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Sat, 21 Oct 2023 10:03:52 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Sat, 21 Oct 2023 10:03:51 GMT + x-ms-meta-name: + - ae2b7263-7dbe-4567-92f3-626b5c6babf7 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Sat, 21 Oct 2023 10:18:21 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "name", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"}, + "inputsMapping": {}, "connections": {}, "environmentVariables": {}, "runtimeName": + "demo-mir", "sessionId": "ddeba43e9ebd3dab442a5b3660706e75d2bf0dae3e2b9a61", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '740' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '13.357' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697883514, "rootRunId": "name", "createdUtc": + "2023-10-21T10:18:34.157499+00:00", "createdBy": {"userObjectId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", + "userPuId": "10032001B4DCFEFF", "userIdp": null, "userAltSecId": null, "userIss": + "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": + "00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "upn": + null}, "userId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", "token": null, "tokenExpiryTimeUtc": + null, "error": null, "warnings": null, "revision": 2, "statusRevision": 1, + "runUuid": "d6c7451b-8e90-4892-87e9-30ad30f2be2a", "parentRunUuid": null, + "rootRunUuid": "d6c7451b-8e90-4892-87e9-30ad30f2be2a", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", + "userPuId": "10032001B4DCFEFF", "userIdp": null, "userAltSecId": null, "userIss": + "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": + "00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "upn": + null}, "lastModifiedUtc": "2023-10-21T10:18:34.157499+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId": + null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "status": "Preparing", + "startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName": + "web_classification", "name": null, "dataContainerId": "dcid.name", "description": + null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": + {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "ddeba43e9ebd3dab442a5b3660706e75d2bf0dae3e2b9a61", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/217dbb3d5adb00d936791ee0eb78382e/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl", + "azureml.promptflow.snapshot_id": "bf8537d9-ef76-476c-b1cc-302de091c8c6"}, + "parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "fetch_text_content_from_url", "Type": "python", "Source": + {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"}, + "Tool": "classify_with_llm.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}, {"Name": "convert_to_dict", "Type": "python", + "Source": {"Type": "code", "Tool": null, "Path": "convert_to_dict.py"}, "Inputs": + {"input_str": "${classify_with_llm.output}"}, "Tool": "convert_to_dict.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "summarize_text_content", + "Type": "llm", "Source": {"Type": "code", "Tool": null, "Path": "summarize_text_content__variant_1.jinja2"}, + "Inputs": {"deployment_name": "text-davinci-003", "suffix": "", "max_tokens": + "256", "temperature": "0.3", "top_p": "1.0", "logprobs": "", "echo": "False", + "stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of": + "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "Tool": + "summarize_text_content__variant_1.jinja2", "Reduce": false, "Comment": null, + "Activate": null, "Api": "completion", "Provider": null, "Connection": "azure_open_ai_connection", + "Module": null}], "Tools": [{"Name": "Content Safety (Text Analyze)", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureContentSafetyConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5", "default_prompt": + null}, {"Name": "Embedding", "Type": "python", "Inputs": {"connection": {"Name": + null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8", "default_prompt": null}, {"Name": "Open Source + LLM", "Type": "custom_llm", "Inputs": {"api": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": ["chat", "completion"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "connection": {"Name": null, "Type": ["CustomConnection"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8", "default_prompt": null}, + {"Name": "Serp API", "Type": "python", "Inputs": {"connection": {"Name": null, + "Type": ["SerpConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "engine": {"Name": null, "Type": ["string"], + "Default": "google", "Description": null, "Enum": ["google", "bing"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "location": {"Name": null, "Type": ["string"], "Default": "", "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8", "default_prompt": null}, + {"Name": "Faiss Index Lookup", "Type": "python", "Inputs": {"path": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], "Default": + "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from the FAISS index file.", "connection_type": null, "Module": + "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1", "default_prompt": null}, {"Name": "Vector DB Lookup", + "Type": "python", "Inputs": {"class_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "collection_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["QdrantConnection"], "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1", "default_prompt": null}, {"Name": "Vector Index + Lookup", "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["object"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search text or vector based query from AzureML + Vector Index.", "connection_type": null, "Module": "promptflow_vectordb.tool.vector_index_lookup", + "class_name": "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1", "default_prompt": + null}, {"Name": "classify_with_llm.jinja2", "Type": "llm", "Inputs": {"examples": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "text_content": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "url": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "classify_with_llm.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}, {"Name": "convert_to_dict.py", "Type": "python", "Inputs": {"input_str": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "convert_to_dict.py", + "LkgCode": null, "Code": null, "Function": "convert_to_dict", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null, "default_prompt": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null, "default_prompt": null}, {"Name": "prepare_examples.py", "Type": "python", + "Inputs": null, "Outputs": null, "Description": null, "connection_type": null, + "Module": null, "class_name": null, "Source": "prepare_examples.py", "LkgCode": + null, "Code": null, "Function": "prepare_examples", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}, {"Name": "summarize_text_content.jinja2", "Type": "llm", "Inputs": + {"text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "summarize_text_content.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}, {"Name": "summarize_text_content__variant_1.jinja2", "Type": "llm", + "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}], "Codes": null, "Inputs": {"url": {"Name": null, "Type": "string", + "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '40632' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.042' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_cli_with_azure_TestCliWithAzure_test_run_file_with_set.yaml b/src/promptflow/tests/test_configs/recordings/test_cli_with_azure_TestCliWithAzure_test_run_file_with_set.yaml new file mode 100644 index 00000000000..dc9afff0da0 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_cli_with_azure_TestCliWithAzure_test_run_file_with_set.yaml @@ -0,0 +1,688 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding,Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.088' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.099' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 25 Oct 2023 11:06:18 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '21' + content-md5: + - ydz/NwecDDs8TenF9yuf5w== + content-type: + - application/octet-stream + last-modified: + - Thu, 27 Jul 2023 09:10:42 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 27 Jul 2023 09:10:42 GMT + x-ms-meta-name: + - 7852e804-21ad-4c3d-8439-d59c0d9e6e49 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 429f6800-072f-4abb-9fb2-03d7a3874754 + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 25 Oct 2023 11:06:19 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding,Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.113' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.088' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 25 Oct 2023 11:06:23 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '245' + content-md5: + - F+JA0a3CxcLYZ0ANRdlZbA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:30:10 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:30:09 GMT + x-ms-meta-name: + - 7eb4fee6-5edc-4ab3-905c-0a3a3c41d3a3 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 25 Oct 2023 11:06:24 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml", + "runId": "name", "runDisplayName": "print_env_var", "runExperimentName": "print_env_var", + "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl"}, + "inputsMapping": {}, "connections": {}, "environmentVariables": {"API_BASE": + "${azure_open_ai_connection.api_base}"}, "runtimeName": "demo-mir", "sessionId": + "4d338c3c435dc090371f703f527a0b27b4bd7182c3fff543", "flowLineageId": "f1efdb93dcf9b3c17e246e7bcf0e2c7398d7bc289f8dd2c3d8f808eacc63c31f", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '770' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '10.228' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1698231995, "rootRunId": "name", "createdUtc": + "2023-10-25T11:06:35.1681223+00:00", "createdBy": {"userObjectId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", + "userPuId": "10032001B4DCFEFF", "userIdp": null, "userAltSecId": null, "userIss": + "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": + "00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "upn": + null}, "userId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", "token": null, "tokenExpiryTimeUtc": + null, "error": null, "warnings": null, "revision": 2, "statusRevision": 1, + "runUuid": "ba770203-7c93-41ec-8780-cb5a6630676a", "parentRunUuid": null, + "rootRunUuid": "ba770203-7c93-41ec-8780-cb5a6630676a", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", + "userPuId": "10032001B4DCFEFF", "userIdp": null, "userAltSecId": null, "userIss": + "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": + "00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "upn": + null}, "lastModifiedUtc": "2023-10-25T11:06:35.1681223+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId": + null, "experimentId": "6a87c3ae-5a75-4c5d-9eb9-5203b0062282", "status": "Preparing", + "startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName": + "print_env_var", "name": null, "dataContainerId": "dcid.name", "description": + null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": + {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "4d338c3c435dc090371f703f527a0b27b4bd7182c3fff543", + "azureml.promptflow.flow_lineage_id": "f1efdb93dcf9b3c17e246e7bcf0e2c7398d7bc289f8dd2c3d8f808eacc63c31f", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/3360ae705933fb90bcd290241ca0ece9/print_env_var/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/24ae753309d7e36d73d1c9d7d2a03845/env_var_names.jsonl", + "azureml.promptflow.snapshot_id": "83edc27a-0ac8-43f8-b236-ab6ac84ca4d4", + "_azureml.evalutation_run": "promptflow.BatchRun"}, "parameters": {}, "actionUris": + {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], + "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "print_env", "Type": "python", "Source": {"Type": "code", + "Tool": null, "Path": "print_env.py"}, "Inputs": {"key": "${inputs.key}"}, + "Tool": "print_env.py", "Reduce": false, "Comment": null, "Activate": null, + "Api": null, "Provider": null, "Connection": null, "Module": null}], "Tools": + [{"Name": "Content Safety (Text Analyze)", "Type": "python", "Inputs": {"connection": + {"Name": null, "Type": ["AzureContentSafetyConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5", "default_prompt": + null}, {"Name": "Embedding", "Type": "python", "Inputs": {"connection": {"Name": + null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8", "default_prompt": null}, {"Name": "Open Source + LLM", "Type": "custom_llm", "Inputs": {"api": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": ["chat", "completion"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "connection": {"Name": null, "Type": ["CustomConnection"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8", "default_prompt": null}, + {"Name": "Serp API", "Type": "python", "Inputs": {"connection": {"Name": null, + "Type": ["SerpConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "engine": {"Name": null, "Type": ["string"], + "Default": "google", "Description": null, "Enum": ["google", "bing"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "location": {"Name": null, "Type": ["string"], "Default": "", "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8", "default_prompt": null}, + {"Name": "Faiss Index Lookup", "Type": "python", "Inputs": {"path": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], "Default": + "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from the FAISS index file.", "connection_type": null, "Module": + "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1", "default_prompt": null}, {"Name": "Vector DB Lookup", + "Type": "python", "Inputs": {"class_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "collection_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["QdrantConnection"], "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1", "default_prompt": null}, {"Name": "Vector Index + Lookup", "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["object"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search text or vector based query from AzureML + Vector Index.", "connection_type": null, "Module": "promptflow_vectordb.tool.vector_index_lookup", + "class_name": "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1", "default_prompt": + null}, {"Name": "print_env.py", "Type": "python", "Inputs": {"key": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "print_env.py", "LkgCode": + null, "Code": null, "Function": "get_env_var", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}], "Codes": null, "Inputs": {"key": {"Name": null, "Type": "string", + "Default": null, "Description": null, "is_chat_input": false, "is_chat_history": + null}}, "Outputs": {"output": {"Name": null, "Type": "string", "Description": + null, "Reference": "${print_env.output.value}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '31040' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.037' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_cli_with_azure_TestCliWithAzure_test_run_with_remote_data.yaml b/src/promptflow/tests/test_configs/recordings/test_cli_with_azure_TestCliWithAzure_test_run_with_remote_data.yaml new file mode 100644 index 00000000000..f3b4db392b5 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_cli_with_azure_TestCliWithAzure_test_run_with_remote_data.yaml @@ -0,0 +1,1215 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/webClassification1/versions/1 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/webClassification1/versions/1", + "name": "1", "type": "Microsoft.MachineLearningServices/workspaces/00000/versions", + "properties": {"description": null, "tags": {}, "properties": {}, "isArchived": + false, "isAnonymous": false, "autoDeleteSetting": null, "dataUri": "azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/eda0bdf303d802b35c788f378fa379f6/webClassification1.jsonl", + "stage": null, "intellectualProperty": null, "dataType": "uri_file"}, "systemData": + {"createdAt": "2023-07-28T06:38:13.7134012+00:00", "createdBy": "Han Wang", + "createdByType": "User", "lastModifiedAt": "2023-07-28T06:38:13.7232364+00:00"}}' + headers: + cache-control: + - no-cache + content-length: + - '967' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding,Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.042' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding,Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.066' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.156' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Sat, 21 Oct 2023 10:19:46 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Sat, 21 Oct 2023 10:03:52 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Sat, 21 Oct 2023 10:03:51 GMT + x-ms-meta-name: + - ae2b7263-7dbe-4567-92f3-626b5c6babf7 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Sat, 21 Oct 2023 10:19:47 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "name1", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "batchDataInput": {"dataUri": "azureml:/subscriptions/96aede12-2f73-41cb-b983-6d11a904839b/resourceGroups/promptflow/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus/data/webClassification1/versions/1"}, + "inputsMapping": {}, "connections": {}, "environmentVariables": {}, "runtimeName": + "demo-mir", "sessionId": "ddeba43e9ebd3dab442a5b3660706e75d2bf0dae3e2b9a61", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '818' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name1"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '11.616' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding,Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.104' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.145' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Sat, 21 Oct 2023 10:20:10 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Sat, 21 Oct 2023 10:03:52 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Sat, 21 Oct 2023 10:03:51 GMT + x-ms-meta-name: + - ae2b7263-7dbe-4567-92f3-626b5c6babf7 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Sat, 21 Oct 2023 10:20:11 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "name2", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "batchDataInput": {"dataUri": "azureml:webClassification1:1"}, + "inputsMapping": {}, "connections": {}, "environmentVariables": {}, "runtimeName": + "demo-mir", "sessionId": "ddeba43e9ebd3dab442a5b3660706e75d2bf0dae3e2b9a61", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '653' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name2"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '12.159' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name1", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697883600, "rootRunId": "name1", "createdUtc": + "2023-10-21T10:20:00.110628+00:00", "createdBy": {"userObjectId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", + "userPuId": "10032001B4DCFEFF", "userIdp": null, "userAltSecId": null, "userIss": + "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": + "00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "upn": + null}, "userId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", "token": null, "tokenExpiryTimeUtc": + null, "error": null, "warnings": null, "revision": 2, "statusRevision": 1, + "runUuid": "826cf37c-81d3-42a6-815a-8d0ad2e2e9cb", "parentRunUuid": null, + "rootRunUuid": "826cf37c-81d3-42a6-815a-8d0ad2e2e9cb", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", + "userPuId": "10032001B4DCFEFF", "userIdp": null, "userAltSecId": null, "userIss": + "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": + "00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "upn": + null}, "lastModifiedUtc": "2023-10-21T10:20:00.110628+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name1", + "parentRunId": null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", + "status": "Preparing", "startTimeUtc": null, "endTimeUtc": null, "scheduleId": + null, "displayName": "web_classification", "name": null, "dataContainerId": + "dcid.name1", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "ddeba43e9ebd3dab442a5b3660706e75d2bf0dae3e2b9a61", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/217dbb3d5adb00d936791ee0eb78382e/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/webClassification1/versions/1", + "azureml.promptflow.snapshot_id": "482e5adb-1209-48c4-a485-ee37703233be"}, + "parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "fetch_text_content_from_url", "Type": "python", "Source": + {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"}, + "Tool": "classify_with_llm.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}, {"Name": "convert_to_dict", "Type": "python", + "Source": {"Type": "code", "Tool": null, "Path": "convert_to_dict.py"}, "Inputs": + {"input_str": "${classify_with_llm.output}"}, "Tool": "convert_to_dict.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "summarize_text_content", + "Type": "llm", "Source": {"Type": "code", "Tool": null, "Path": "summarize_text_content__variant_1.jinja2"}, + "Inputs": {"deployment_name": "text-davinci-003", "suffix": "", "max_tokens": + "256", "temperature": "0.3", "top_p": "1.0", "logprobs": "", "echo": "False", + "stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of": + "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "Tool": + "summarize_text_content__variant_1.jinja2", "Reduce": false, "Comment": null, + "Activate": null, "Api": "completion", "Provider": null, "Connection": "azure_open_ai_connection", + "Module": null}], "Tools": [{"Name": "Content Safety (Text Analyze)", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureContentSafetyConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5", "default_prompt": + null}, {"Name": "Embedding", "Type": "python", "Inputs": {"connection": {"Name": + null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8", "default_prompt": null}, {"Name": "Open Source + LLM", "Type": "custom_llm", "Inputs": {"api": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": ["chat", "completion"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "connection": {"Name": null, "Type": ["CustomConnection"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8", "default_prompt": null}, + {"Name": "Serp API", "Type": "python", "Inputs": {"connection": {"Name": null, + "Type": ["SerpConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "engine": {"Name": null, "Type": ["string"], + "Default": "google", "Description": null, "Enum": ["google", "bing"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "location": {"Name": null, "Type": ["string"], "Default": "", "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8", "default_prompt": null}, + {"Name": "Faiss Index Lookup", "Type": "python", "Inputs": {"path": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], "Default": + "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from the FAISS index file.", "connection_type": null, "Module": + "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1", "default_prompt": null}, {"Name": "Vector DB Lookup", + "Type": "python", "Inputs": {"class_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "collection_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["QdrantConnection"], "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1", "default_prompt": null}, {"Name": "Vector Index + Lookup", "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["object"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search text or vector based query from AzureML + Vector Index.", "connection_type": null, "Module": "promptflow_vectordb.tool.vector_index_lookup", + "class_name": "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1", "default_prompt": + null}, {"Name": "classify_with_llm.jinja2", "Type": "llm", "Inputs": {"examples": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "text_content": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "url": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "classify_with_llm.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}, {"Name": "convert_to_dict.py", "Type": "python", "Inputs": {"input_str": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "convert_to_dict.py", + "LkgCode": null, "Code": null, "Function": "convert_to_dict", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null, "default_prompt": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null, "default_prompt": null}, {"Name": "prepare_examples.py", "Type": "python", + "Inputs": null, "Outputs": null, "Description": null, "connection_type": null, + "Module": null, "class_name": null, "Source": "prepare_examples.py", "LkgCode": + null, "Code": null, "Function": "prepare_examples", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}, {"Name": "summarize_text_content.jinja2", "Type": "llm", "Inputs": + {"text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "summarize_text_content.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}, {"Name": "summarize_text_content__variant_1.jinja2", "Type": "llm", + "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}], "Codes": null, "Inputs": {"url": {"Name": null, "Type": "string", + "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '40710' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.037' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name2", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697883624, "rootRunId": "name2", "createdUtc": + "2023-10-21T10:20:24.6189241+00:00", "createdBy": {"userObjectId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", + "userPuId": "10032001B4DCFEFF", "userIdp": null, "userAltSecId": null, "userIss": + "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": + "00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "upn": + null}, "userId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", "token": null, "tokenExpiryTimeUtc": + null, "error": null, "warnings": null, "revision": 2, "statusRevision": 1, + "runUuid": "5ed93661-eb6f-44ff-92e1-03c451bbdd87", "parentRunUuid": null, + "rootRunUuid": "5ed93661-eb6f-44ff-92e1-03c451bbdd87", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "dccfa7b6-87c6-4f1e-af43-555f876e37a7", + "userPuId": "10032001B4DCFEFF", "userIdp": null, "userAltSecId": null, "userIss": + "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": + "00000000-0000-0000-0000-000000000000", "userName": "Zhengfei Wang", "upn": + null}, "lastModifiedUtc": "2023-10-21T10:20:24.6189241+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name2", + "parentRunId": null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", + "status": "Preparing", "startTimeUtc": null, "endTimeUtc": null, "scheduleId": + null, "displayName": "web_classification", "name": null, "dataContainerId": + "dcid.name2", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "ddeba43e9ebd3dab442a5b3660706e75d2bf0dae3e2b9a61", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/217dbb3d5adb00d936791ee0eb78382e/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml:webClassification1:1", "azureml.promptflow.snapshot_id": + "6edeca6a-5b87-4431-9b16-54709e17d68c"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "fetch_text_content_from_url", "Type": "python", "Source": + {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"}, + "Tool": "classify_with_llm.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}, {"Name": "convert_to_dict", "Type": "python", + "Source": {"Type": "code", "Tool": null, "Path": "convert_to_dict.py"}, "Inputs": + {"input_str": "${classify_with_llm.output}"}, "Tool": "convert_to_dict.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "summarize_text_content", + "Type": "llm", "Source": {"Type": "code", "Tool": null, "Path": "summarize_text_content__variant_1.jinja2"}, + "Inputs": {"deployment_name": "text-davinci-003", "suffix": "", "max_tokens": + "256", "temperature": "0.3", "top_p": "1.0", "logprobs": "", "echo": "False", + "stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of": + "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "Tool": + "summarize_text_content__variant_1.jinja2", "Reduce": false, "Comment": null, + "Activate": null, "Api": "completion", "Provider": null, "Connection": "azure_open_ai_connection", + "Module": null}], "Tools": [{"Name": "Content Safety (Text Analyze)", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureContentSafetyConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5", "default_prompt": + null}, {"Name": "Embedding", "Type": "python", "Inputs": {"connection": {"Name": + null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8", "default_prompt": null}, {"Name": "Open Source + LLM", "Type": "custom_llm", "Inputs": {"api": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": ["chat", "completion"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "connection": {"Name": null, "Type": ["CustomConnection"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8", "default_prompt": null}, + {"Name": "Serp API", "Type": "python", "Inputs": {"connection": {"Name": null, + "Type": ["SerpConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "engine": {"Name": null, "Type": ["string"], + "Default": "google", "Description": null, "Enum": ["google", "bing"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "location": {"Name": null, "Type": ["string"], "Default": "", "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8", "default_prompt": null}, + {"Name": "Faiss Index Lookup", "Type": "python", "Inputs": {"path": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], "Default": + "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from the FAISS index file.", "connection_type": null, "Module": + "promptflow_vectordb.tool.faiss_index_lookup", "class_name": "FaissIndexLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1", "default_prompt": null}, {"Name": "Vector DB Lookup", + "Type": "python", "Inputs": {"class_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "collection_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["QdrantConnection"], "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1", "default_prompt": null}, {"Name": "Vector Index + Lookup", "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["object"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search text or vector based query from AzureML + Vector Index.", "connection_type": null, "Module": "promptflow_vectordb.tool.vector_index_lookup", + "class_name": "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1", "default_prompt": + null}, {"Name": "classify_with_llm.jinja2", "Type": "llm", "Inputs": {"examples": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "text_content": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "url": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "classify_with_llm.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}, {"Name": "convert_to_dict.py", "Type": "python", "Inputs": {"input_str": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "convert_to_dict.py", + "LkgCode": null, "Code": null, "Function": "convert_to_dict", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null, "default_prompt": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null, "default_prompt": null}, {"Name": "prepare_examples.py", "Type": "python", + "Inputs": null, "Outputs": null, "Description": null, "connection_type": null, + "Module": null, "class_name": null, "Source": "prepare_examples.py", "LkgCode": + null, "Code": null, "Function": "prepare_examples", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}, {"Name": "summarize_text_content.jinja2", "Type": "llm", "Inputs": + {"text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "summarize_text_content.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}, {"Name": "summarize_text_content__variant_1.jinja2", "Type": "llm", + "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null, "default_prompt": + null}], "Codes": null, "Inputs": {"url": {"Name": null, "Type": "string", + "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '40547' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.054' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_get_connection.yaml b/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_get_connection.yaml new file mode 100644 index 00000000000..35474860fbd --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_get_connection.yaml @@ -0,0 +1,174 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/api-version=2023-06-01-preview + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.029' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?api-version=2023-04-01-preview&count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.107' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/Connections/azure_open_ai_connection + response: + body: + string: '{"connectionName": "azure_open_ai_connection", "connectionType": "AzureOpenAI", + "configs": {"api_base": "https://fake.openai.azure.com", "api_key": null, + "api_type": "azure", "api_version": "2023-07-01-preview", "resource_id": null}, + "owner": {"userName": "yigao@microsoft.com"}, "createdDate": "2023-08-22T10:15:34.5762053Z", + "lastModifiedDate": "2023-08-22T10:15:34.5762053Z"}' + headers: + connection: + - keep-alive + content-length: + - '366' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.345' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/Connections/custom_connection + response: + body: + string: '{"connectionName": "custom_connection", "connectionType": "Custom", + "owner": {"userName": "sejuare@microsoft.com"}, "createdDate": "2023-06-19T20:56:12.0353964Z", + "lastModifiedDate": "2023-06-19T20:56:12.0353964Z"}' + headers: + connection: + - keep-alive + content-length: + - '204' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '0.336' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_list_connection_spec.yaml b/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_list_connection_spec.yaml new file mode 100644 index 00000000000..1330620f4fd --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_connection_operations_TestConnectionOperations_test_list_connection_spec.yaml @@ -0,0 +1,179 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/api-version=2023-06-01-preview + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.019' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?api-version=2023-04-01-preview&count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.063' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/Connections/specs + response: + body: + string: '[{"connectionCategory": 15, "flowValueType": "AzureContentSafetyConnection", + "connectionType": "AzureContentSafety", "connectionTypeDisplayName": "Azure + content safety", "configSpecs": [{"name": "api_key", "displayName": "API key", + "configValueType": "Secret", "isOptional": false}, {"name": "endpoint", "displayName": + "Endpoint", "configValueType": "String", "isOptional": false}, {"name": "api_version", + "displayName": "API version", "configValueType": "String", "defaultValue": + "2023-04-30-preview", "isOptional": false}, {"name": "api_type", "displayName": + "API type", "configValueType": "String", "defaultValue": "Content Safety", + "isOptional": false}], "module": "promptflow.connections"}, {"connectionCategory": + 13, "flowValueType": "AzureOpenAIConnection", "connectionType": "AzureOpenAI", + "connectionTypeDisplayName": "Azure OpenAI", "configSpecs": [{"name": "api_key", + "displayName": "API key", "configValueType": "Secret", "isOptional": false}, + {"name": "api_base", "displayName": "API base", "configValueType": "String", + "isOptional": false}, {"name": "api_type", "displayName": "API type", "configValueType": + "String", "defaultValue": "azure", "isOptional": false}, {"name": "api_version", + "displayName": "API version", "configValueType": "String", "defaultValue": + "2023-07-01-preview", "isOptional": false}, {"name": "resource_id", "displayName": + "Resource id", "configValueType": "String", "isOptional": false}], "module": + "promptflow.connections"}, {"connectionCategory": 14, "flowValueType": "CognitiveSearchConnection", + "connectionType": "CognitiveSearch", "connectionTypeDisplayName": "Cognitive + search", "configSpecs": [{"name": "api_key", "displayName": "API key", "configValueType": + "Secret", "isOptional": false}, {"name": "api_base", "displayName": "API base", + "configValueType": "String", "isOptional": false}, {"name": "api_version", + "displayName": "API version", "configValueType": "String", "defaultValue": + "2023-07-01-Preview", "isOptional": false}], "module": "promptflow.connections"}, + {"connectionCategory": 16, "flowValueType": "CustomConnection", "connectionType": + "Custom", "connectionTypeDisplayName": "Custom", "module": "promptflow.connections"}, + {"connectionCategory": 16, "flowValueType": "OpenAIConnection", "connectionType": + "OpenAI", "connectionTypeDisplayName": "OpenAI", "configSpecs": [{"name": + "api_key", "displayName": "API key", "configValueType": "Secret", "isOptional": + false}, {"name": "organization", "displayName": "Organization", "configValueType": + "String", "isOptional": true}], "module": "promptflow.connections"}, {"connectionCategory": + 16, "flowValueType": "QdrantConnection", "connectionType": "Qdrant", "connectionTypeDisplayName": + "Qdrant", "configSpecs": [{"name": "api_key", "displayName": "API key", "configValueType": + "Secret", "isOptional": false}, {"name": "api_base", "displayName": "API base", + "configValueType": "String", "isOptional": false}], "module": "promptflow_vectordb.connections.qdrant"}, + {"connectionCategory": 16, "flowValueType": "SerpConnection", "connectionType": + "Serp", "connectionTypeDisplayName": "Serp", "configSpecs": [{"name": "api_key", + "displayName": "API key", "configValueType": "Secret", "isOptional": false}], + "module": "promptflow.connections"}, {"connectionCategory": 16, "flowValueType": + "WeaviateConnection", "connectionType": "Weaviate", "connectionTypeDisplayName": + "Weaviate", "configSpecs": [{"name": "api_key", "displayName": "API key", + "configValueType": "Secret", "isOptional": false}, {"name": "api_base", "displayName": + "API base", "configValueType": "String", "isOptional": false}], "module": + "promptflow_vectordb.connections.weaviate"}]' + headers: + connection: + - keep-alive + content-length: + - '3402' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.769' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_automatic_runtime.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_automatic_runtime.yaml new file mode 100644 index 00000000000..b71c00936dc --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_automatic_runtime.yaml @@ -0,0 +1,806 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.057' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.072' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.084' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.263' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:39:50 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '21' + content-md5: + - ydz/NwecDDs8TenF9yuf5w== + content-type: + - application/octet-stream + last-modified: + - Thu, 27 Jul 2023 09:10:42 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 27 Jul 2023 09:10:42 GMT + x-ms-meta-name: + - 7852e804-21ad-4c3d-8439-d59c0d9e6e49 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 429f6800-072f-4abb-9fb2-03d7a3874754 + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:39:51 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.146' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.116' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:39:54 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '245' + content-md5: + - F+JA0a3CxcLYZ0ANRdlZbA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:30:10 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:30:09 GMT + x-ms-meta-name: + - 7eb4fee6-5edc-4ab3-905c-0a3a3c41d3a3 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:39:55 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.148' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.108' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:40:00 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '21' + content-md5: + - ydz/NwecDDs8TenF9yuf5w== + content-type: + - application/octet-stream + last-modified: + - Thu, 27 Jul 2023 09:10:42 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 27 Jul 2023 09:10:42 GMT + x-ms-meta-name: + - 7852e804-21ad-4c3d-8439-d59c0d9e6e49 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 429f6800-072f-4abb-9fb2-03d7a3874754 + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:40:01 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.096' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.097' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:40:04 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '245' + content-md5: + - F+JA0a3CxcLYZ0ANRdlZbA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:30:10 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:30:09 GMT + x-ms-meta-name: + - 7eb4fee6-5edc-4ab3-905c-0a3a3c41d3a3 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:40:05 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_automatic_runtime_with_environment.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_automatic_runtime_with_environment.yaml new file mode 100644 index 00000000000..c20b021feaf --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_automatic_runtime_with_environment.yaml @@ -0,0 +1,452 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.066' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.082' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.082' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.129' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:41:38 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '21' + content-md5: + - ydz/NwecDDs8TenF9yuf5w== + content-type: + - application/octet-stream + last-modified: + - Thu, 27 Jul 2023 09:10:42 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 27 Jul 2023 09:10:42 GMT + x-ms-meta-name: + - 7852e804-21ad-4c3d-8439-d59c0d9e6e49 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 429f6800-072f-4abb-9fb2-03d7a3874754 + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:41:39 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.083' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.159' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:41:42 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/flow_with_environment/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '328' + content-md5: + - MHHZXt2jTItYYIrTu2pTZA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:31:00 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:31:00 GMT + x-ms-meta-name: + - 5dc7e741-5a43-42d2-b690-f06c0463c695 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:41:43 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/flow_with_environment/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_basic_evaluation.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_basic_evaluation.yaml new file mode 100644 index 00000000000..d81c6ab1d4f --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_basic_evaluation.yaml @@ -0,0 +1,2581 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.027' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.076' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.132' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.108' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:17:01 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '379' + content-md5: + - lI/pz9jzTQ7Td3RHPL7y7w== + content-type: + - application/octet-stream + last-modified: + - Tue, 25 Jul 2023 06:21:56 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Tue, 25 Jul 2023 06:21:56 GMT + x-ms-meta-name: + - e0068493-1fbe-451c-96b3-cf6b013632ad + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 1f73938f-def0-4a75-b4d0-6b07a2378e1b + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:17:02 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.123' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.119' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:17:05 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Mon, 28 Aug 2023 14:22:57 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Mon, 28 Aug 2023 14:22:57 GMT + x-ms-meta-name: + - f8d42f9b-ad14-4f6d-ad92-08c1b6de1b0d + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:17:06 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "batch_run_name", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "nodeVariant": "${summarize_text_content.variant_0}", + "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"}, + "inputsMapping": {"url": "${data.url}"}, "connections": {}, "environmentVariables": + {}, "runtimeName": "demo-mir", "sessionId": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '814' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"batch_run_name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '25.885' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.139' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.082' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:18:15 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '379' + content-md5: + - lI/pz9jzTQ7Td3RHPL7y7w== + content-type: + - application/octet-stream + last-modified: + - Tue, 25 Jul 2023 06:21:56 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Tue, 25 Jul 2023 06:21:56 GMT + x-ms-meta-name: + - e0068493-1fbe-451c-96b3-cf6b013632ad + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 1f73938f-def0-4a75-b4d0-6b07a2378e1b + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:18:16 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.112' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.127' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:18:20 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/classification_accuracy_evaluation/calculate_accuracy.py + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '694' + content-md5: + - Ws7CN2arUzhpkNJiLbo0Tw== + content-type: + - application/octet-stream + last-modified: + - Fri, 01 Sep 2023 11:19:26 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Fri, 01 Sep 2023 11:19:25 GMT + x-ms-meta-name: + - fe073649-9a4d-47ca-802e-b9069cf8f76c + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:18:21 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/classification_accuracy_evaluation/calculate_accuracy.py + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/classification_accuracy_evaluation/flow.dag.yaml", + "runId": "eval_run_name", "runDisplayName": "classification_accuracy_evaluation", + "runExperimentName": "classification_accuracy_evaluation", "variantRunId": "batch_run_name", + "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"}, + "inputsMapping": {"groundtruth": "${data.answer}", "prediction": "${run.outputs.category}"}, + "connections": {}, "environmentVariables": {}, "runtimeName": "demo-mir", "sessionId": + "0cdfb546132d26ab9ea3d7cee6381399ead76bbe50c756a0", "flowLineageId": "d33106ee90a69ef62de39d2caddf8806908c9b83e611716825cadfc7d2cb4174", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '916' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"eval_run_name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '9.892' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.104' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.146' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:18:59 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/classification_accuracy_evaluation/calculate_accuracy.py + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '694' + content-md5: + - Ws7CN2arUzhpkNJiLbo0Tw== + content-type: + - application/octet-stream + last-modified: + - Fri, 01 Sep 2023 11:19:26 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Fri, 01 Sep 2023 11:19:25 GMT + x-ms-meta-name: + - fe073649-9a4d-47ca-802e-b9069cf8f76c + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:19:01 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/classification_accuracy_evaluation/calculate_accuracy.py + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/classification_accuracy_evaluation/flow.dag.yaml", + "runId": "eval_run_name_1", "runDisplayName": "classification_accuracy_evaluation", + "runExperimentName": "classification_accuracy_evaluation", "variantRunId": "batch_run_name", + "batchDataInput": {}, "inputsMapping": {"groundtruth": "${run.inputs.url}", + "prediction": "${run.outputs.category}"}, "connections": {}, "environmentVariables": + {}, "runtimeName": "demo-mir", "sessionId": "0cdfb546132d26ab9ea3d7cee6381399ead76bbe50c756a0", + "flowLineageId": "d33106ee90a69ef62de39d2caddf8806908c9b83e611716825cadfc7d2cb4174", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '791' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"eval_run_name_1"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '10.639' + status: + code: 200 + message: OK +- request: + body: '{"runId": "batch_run_name", "selectRunMetadata": true, "selectRunDefinition": + true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697617053, "rootRunId": "batch_run_name", + "createdUtc": "2023-10-18T08:17:33.6185504+00:00", "createdBy": {"userObjectId": + "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 5, + "statusRevision": 3, "runUuid": "7621e9c0-87c9-4c06-b1d3-a5c60a9d01fd", "parentRunUuid": + null, "rootRunUuid": "7621e9c0-87c9-4c06-b1d3-a5c60a9d01fd", "lastStartTimeUtc": + null, "currentComputeTime": null, "computeDuration": "00:00:19.8604351", "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "74013e41-d17e-462a-8db6-5c0e26c0368c", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "ec6824af-81f6-47fa-a07e-5a04ff0b94e7", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:18:04.4562042+00:00", "duration": + "00:00:19.8604351", "cancelationReason": null, "currentAttemptId": 1, "runId": + "batch_run_name", "parentRunId": null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", + "status": "Completed", "startTimeUtc": "2023-10-18T08:17:45.6765161+00:00", + "endTimeUtc": "2023-10-18T08:18:05.5369512+00:00", "scheduleId": null, "displayName": + "web_classification", "name": null, "dataContainerId": "dcid.batch_run_name", + "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/623c2a5b51c1eb9639ec4374ee09eaaa/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl", + "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": + "ffd97207-e85e-4c1d-a502-230a527ab0db", "azureml.promptflow.total_tokens": + "2448"}, "parameters": {}, "actionUris": {}, "scriptName": null, "target": + null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings": {}, "services": + {}, "inputDatasets": [], "outputDatasets": [], "runDefinition": null, "jobSpecification": + null, "primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri": + null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId": + "azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1", + "type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1", + "type": "UriFolder"}}}, "runDefinition": {"Nodes": [{"Name": "fetch_text_content_from_url", + "Type": "python", "Source": {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"}, + "Tool": "classify_with_llm.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}, {"Name": "convert_to_dict", "Type": "python", + "Source": {"Type": "code", "Tool": null, "Path": "convert_to_dict.py"}, "Inputs": + {"input_str": "${classify_with_llm.output}"}, "Tool": "convert_to_dict.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "summarize_text_content", + "Type": "llm", "Source": {"Type": "code", "Tool": null, "Path": "summarize_text_content.jinja2"}, + "Inputs": {"deployment_name": "text-davinci-003", "suffix": "", "max_tokens": + "128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False", + "stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of": + "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "Tool": + "summarize_text_content.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}], "Tools": [{"Name": "Content Safety (Text + Analyze)", "Type": "python", "Inputs": {"connection": {"Name": null, "Type": + ["AzureContentSafetyConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "hate_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "sexual_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "text": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "violence_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure + Content Safety to detect harmful content.", "connection_type": null, "Module": + "content_safety_text.tools.content_safety_text_tool", "class_name": null, + "Source": null, "LkgCode": null, "Code": null, "Function": "analyze_text", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-contentsafety", + "package_version": "0.0.5"}, {"Name": "Embedding", "Type": "python", "Inputs": + {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", + "Inputs": {"api": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["chat", "completion"], "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "connection": {"Name": null, "Type": ["CustomConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "classify_with_llm.jinja2", + "Type": "llm", "Inputs": {"examples": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "text_content": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "classify_with_llm.jinja2", "LkgCode": null, + "Code": null, "Function": null, "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + false, "package": null, "package_version": null}, {"Name": "convert_to_dict.py", + "Type": "python", "Inputs": {"input_str": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "convert_to_dict.py", "LkgCode": null, "Code": + null, "Function": "convert_to_dict", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "prepare_examples.py", "Type": "python", "Inputs": null, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "prepare_examples.py", "LkgCode": null, "Code": null, "Function": + "prepare_examples", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}, {"Name": "summarize_text_content.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content.jinja2", "LkgCode": + null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "summarize_text_content__variant_1.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}], "Codes": + null, "Inputs": {"url": {"Name": null, "Type": "string", "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '41131' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.050' + status: + code: 200 + message: OK +- request: + body: '{"runId": "eval_run_name", "selectRunMetadata": true, "selectRunDefinition": + true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697617112, "rootRunId": "eval_run_name", + "createdUtc": "2023-10-18T08:18:32.1754378+00:00", "createdBy": {"userObjectId": + "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 5, + "statusRevision": 3, "runUuid": "beabc0d1-847f-4c56-9b14-221caa887d53", "parentRunUuid": + null, "rootRunUuid": "beabc0d1-847f-4c56-9b14-221caa887d53", "lastStartTimeUtc": + null, "currentComputeTime": null, "computeDuration": "00:00:04.5523185", "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "74013e41-d17e-462a-8db6-5c0e26c0368c", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "ec6824af-81f6-47fa-a07e-5a04ff0b94e7", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:18:48.338951+00:00", "duration": + "00:00:04.5523185", "cancelationReason": null, "currentAttemptId": 1, "runId": + "eval_run_name", "parentRunId": null, "experimentId": "dd66dff5-aa1a-4674-83c3-0c347f3ad863", + "status": "Completed", "startTimeUtc": "2023-10-18T08:18:44.9504712+00:00", + "endTimeUtc": "2023-10-18T08:18:49.5027897+00:00", "scheduleId": null, "displayName": + "classification_accuracy_evaluation", "name": null, "dataContainerId": "dcid.eval_run_name", + "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "0cdfb546132d26ab9ea3d7cee6381399ead76bbe50c756a0", + "azureml.promptflow.flow_lineage_id": "d33106ee90a69ef62de39d2caddf8806908c9b83e611716825cadfc7d2cb4174", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/4d433a29e909bee29cbc6f508ba230ee/classification_accuracy_evaluation/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl", + "azureml.promptflow.input_run_id": "batch_run_name", "azureml.promptflow.inputs_mapping": + "{\"groundtruth\":\"${data.answer}\",\"prediction\":\"${run.outputs.category}\"}", + "azureml.promptflow.snapshot_id": "0339df11-0eba-49c2-8aef-12b177d75c22", + "azureml.promptflow.total_tokens": "0"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId": + "azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_debug_info/versions/1", + "type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_flow_outputs/versions/1", + "type": "UriFolder"}}}, "runDefinition": {"Nodes": [{"Name": "grade", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "grade.py"}, "Inputs": + {"groundtruth": "${inputs.groundtruth}", "prediction": "${inputs.prediction}"}, + "Tool": "grade.py", "Reduce": false, "Comment": null, "Activate": null, "Api": + null, "Provider": null, "Connection": null, "Module": null}, {"Name": "calculate_accuracy", + "Type": "python", "Source": {"Type": "code", "Tool": null, "Path": "calculate_accuracy.py"}, + "Inputs": {"grades": "${grade.output}", "variant_ids": "${inputs.variant_id}"}, + "Tool": "calculate_accuracy.py", "Reduce": true, "Comment": null, "Activate": + null, "Api": null, "Provider": null, "Connection": null, "Module": null}], + "Tools": [{"Name": "Content Safety (Text Analyze)", "Type": "python", "Inputs": + {"connection": {"Name": null, "Type": ["AzureContentSafetyConnection"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5"}, {"Name": "Embedding", + "Type": "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", + "OpenAIConnection"], "Default": null, "Description": null, "Enum": null, "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "deployment_name": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["AzureOpenAIConnection"], "enabled_by_value": null, "model_list": ["text-embedding-ada-002", + "text-search-ada-doc-001", "text-search-ada-query-001"], "Capabilities": {"completion": + false, "chat_completion": false, "embeddings": true}, "dynamic_list": null, + "allow_manual_entry": false, "is_multi_select": false}, "input": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "model": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": + ["OpenAIConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Open AI''s embedding model to + create an embedding vector representing the input text.", "connection_type": + null, "Module": "promptflow.tools.embedding", "class_name": null, "Source": + null, "LkgCode": null, "Code": null, "Function": "embedding", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", "package_version": + "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", "Inputs": {"api": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + ["chat", "completion"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CustomConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "calculate_accuracy.py", + "Type": "python", "Inputs": {"grades": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "variant_ids": {"Name": null, "Type": ["object"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "calculate_accuracy.py", + "LkgCode": null, "Code": null, "Function": "calculate_accuracy", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "grade.py", "Type": "python", "Inputs": {"groundtruth": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "prediction": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "grade.py", "LkgCode": null, "Code": null, "Function": + "grade", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}], "Codes": null, "Inputs": {"variant_id": {"Name": + null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false, "is_chat_history": null}, "groundtruth": {"Name": null, "Type": "string", + "Default": null, "Description": "Please specify the groundtruth column, which + contains the true label to the outputs that your flow produces.", "is_chat_input": + false, "is_chat_history": null}, "prediction": {"Name": null, "Type": "string", + "Default": null, "Description": "Please specify the prediction column, which + contains the predicted outputs that your flow produces.", "is_chat_input": + false, "is_chat_history": null}}, "Outputs": {"grade": {"Name": null, "Type": + "string", "Description": null, "Reference": "${grade.output}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '35035' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.053' + status: + code: 200 + message: OK +- request: + body: '{"runId": "eval_run_name_1", "selectRunMetadata": true, "selectRunDefinition": + true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697617152, "rootRunId": "eval_run_name_1", + "createdUtc": "2023-10-18T08:19:12.0592711+00:00", "createdBy": {"userObjectId": + "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": {"error": {"code": "UserError", "severity": + null, "message": "The input for batch run is incorrect. Couldn''t find these + mapping relations: ${data.variant_id}. Please make sure your input mapping + keys and values match your YAML input section and input data. For more information, + refer to the following documentation: https://microsoft.github.io/promptflow/how-to-guides/column-mapping.html.", + "messageFormat": "The input for batch run is incorrect. Couldn''t find these + mapping relations: {invalid_relations}. Please make sure your input mapping + keys and values match your YAML input section and input data. For more information, + refer to the following documentation: https://microsoft.github.io/promptflow/how-to-guides/column-mapping.html.", + "messageParameters": {"invalid_relations": "${data.variant_id}"}, "referenceCode": + "Executor", "detailsUri": null, "target": null, "details": [], "innerError": + {"code": "ValidationError", "innerError": {"code": "InputMappingError", "innerError": + null}}, "debugInfo": {"type": "InputMappingError", "message": "The input for + batch run is incorrect. Couldn''t find these mapping relations: ${data.variant_id}. + Please make sure your input mapping keys and values match your YAML input + section and input data. For more information, refer to the following documentation: + https://microsoft.github.io/promptflow/how-to-guides/column-mapping.html.", + "stackTrace": "Traceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/runtime/runtime.py\", + line 501, in execute_bulk_run_request\n resolved_inputs = flow_executor.validate_and_apply_inputs_mapping(input_dicts, + request.inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 824, in validate_and_apply_inputs_mapping\n resolved_inputs = self._apply_inputs_mapping_for_all_lines(inputs, + inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 1195, in _apply_inputs_mapping_for_all_lines\n result = [FlowExecutor.apply_inputs_mapping(item, + inputs_mapping) for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 1195, in \n result = [FlowExecutor.apply_inputs_mapping(item, + inputs_mapping) for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 1065, in apply_inputs_mapping\n raise InputMappingError(\n", "innerException": + null, "data": null, "errorResponse": null}, "additionalInfo": null}, "correlation": + null, "environment": null, "location": null, "time": "2023-10-18T08:19:26.964687+00:00", + "componentName": "promptflow-runtime/20231011.v2 Designer/1.0 promptflow-sdk/0.0.1 + azsdk-python-azuremachinelearningdesignerserviceclient/unknown Python/3.10.13 + (Windows-10-10.0.22621-SP0) promptflow/0.1.0b8.dev2"}, "warnings": null, "revision": + 6, "statusRevision": 3, "runUuid": "958e943a-04e0-41cd-96e7-62e4fdd24750", + "parentRunUuid": null, "rootRunUuid": "958e943a-04e0-41cd-96e7-62e4fdd24750", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:01.7699375", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "74013e41-d17e-462a-8db6-5c0e26c0368c", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "ec6824af-81f6-47fa-a07e-5a04ff0b94e7", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:19:26.5236285+00:00", "duration": + "00:00:01.7699375", "cancelationReason": null, "currentAttemptId": 1, "runId": + "eval_run_name_1", "parentRunId": null, "experimentId": "dd66dff5-aa1a-4674-83c3-0c347f3ad863", + "status": "Failed", "startTimeUtc": "2023-10-18T08:19:25.4115752+00:00", "endTimeUtc": + "2023-10-18T08:19:27.1815127+00:00", "scheduleId": null, "displayName": "classification_accuracy_evaluation", + "name": null, "dataContainerId": "dcid.eval_run_name_1", "description": null, + "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": + null, "traits": [], "attribution": "PromptFlow", "computeType": "MIR_v2"}, + "properties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "0cdfb546132d26ab9ea3d7cee6381399ead76bbe50c756a0", + "azureml.promptflow.flow_lineage_id": "d33106ee90a69ef62de39d2caddf8806908c9b83e611716825cadfc7d2cb4174", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/4d433a29e909bee29cbc6f508ba230ee/classification_accuracy_evaluation/flow.dag.yaml", + "azureml.promptflow.input_run_id": "batch_run_name", "azureml.promptflow.inputs_mapping": + "{\"groundtruth\":\"${run.inputs.url}\",\"prediction\":\"${run.outputs.category}\"}", + "azureml.promptflow.snapshot_id": "649ff0ec-49e7-41a6-83db-d66bd01b128b", + "azureml.promptflow.total_tokens": "0"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId": + "azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_1_output_data_debug_info/versions/1", + "type": "UriFolder"}}}, "runDefinition": {"Nodes": [{"Name": "grade", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "grade.py"}, "Inputs": + {"groundtruth": "${inputs.groundtruth}", "prediction": "${inputs.prediction}"}, + "Tool": "grade.py", "Reduce": false, "Comment": null, "Activate": null, "Api": + null, "Provider": null, "Connection": null, "Module": null}, {"Name": "calculate_accuracy", + "Type": "python", "Source": {"Type": "code", "Tool": null, "Path": "calculate_accuracy.py"}, + "Inputs": {"grades": "${grade.output}", "variant_ids": "${inputs.variant_id}"}, + "Tool": "calculate_accuracy.py", "Reduce": true, "Comment": null, "Activate": + null, "Api": null, "Provider": null, "Connection": null, "Module": null}], + "Tools": [{"Name": "Content Safety (Text Analyze)", "Type": "python", "Inputs": + {"connection": {"Name": null, "Type": ["AzureContentSafetyConnection"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5"}, {"Name": "Embedding", + "Type": "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", + "OpenAIConnection"], "Default": null, "Description": null, "Enum": null, "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "deployment_name": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["AzureOpenAIConnection"], "enabled_by_value": null, "model_list": ["text-embedding-ada-002", + "text-search-ada-doc-001", "text-search-ada-query-001"], "Capabilities": {"completion": + false, "chat_completion": false, "embeddings": true}, "dynamic_list": null, + "allow_manual_entry": false, "is_multi_select": false}, "input": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "model": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": + ["OpenAIConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Open AI''s embedding model to + create an embedding vector representing the input text.", "connection_type": + null, "Module": "promptflow.tools.embedding", "class_name": null, "Source": + null, "LkgCode": null, "Code": null, "Function": "embedding", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", "package_version": + "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", "Inputs": {"api": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + ["chat", "completion"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CustomConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "calculate_accuracy.py", + "Type": "python", "Inputs": {"grades": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "variant_ids": {"Name": null, "Type": ["object"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "calculate_accuracy.py", + "LkgCode": null, "Code": null, "Function": "calculate_accuracy", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "grade.py", "Type": "python", "Inputs": {"groundtruth": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "prediction": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "grade.py", "LkgCode": null, "Code": null, "Function": + "grade", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}], "Codes": null, "Inputs": {"variant_id": {"Name": + null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false, "is_chat_history": null}, "groundtruth": {"Name": null, "Type": "string", + "Default": null, "Description": "Please specify the groundtruth column, which + contains the true label to the outputs that your flow produces.", "is_chat_input": + false, "is_chat_history": null}, "prediction": {"Name": null, "Type": "string", + "Default": null, "Description": "Please specify the prediction column, which + contains the predicted outputs that your flow produces.", "is_chat_input": + false, "is_chat_history": null}}, "Outputs": {"grade": {"Name": null, "Type": + "string", "Description": null, "Reference": "${grade.output}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '37940' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.045' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/batch_run_name/logContent + response: + body: + string: '"2023-10-18 08:17:36 +0000 12335 promptflow-runtime INFO [batch_run_name] + Receiving v2 bulk run request 91d1724b-041a-44cd-8eb3-60e52f4f7255: {customer_content}\n2023-10-18 + 08:17:36 +0000 12335 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:17:36 +0000 12335 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:17:36 +0000 12335 promptflow-runtime + INFO Running , + 3 more tries to go.\n2023-10-18 08:17:36 +0000 12335 promptflow-runtime + INFO Updating batch_run_name to Status.Preparing...\n2023-10-18 08:17:36 + +0000 12335 promptflow-runtime INFO Starting to check process 31952 + status for run batch_run_name\n2023-10-18 08:17:36 +0000 12335 promptflow-runtime + INFO Start checking run status for bulk run batch_run_name\n2023-10-18 + 08:17:36 +0000 12335 promptflow-runtime INFO Start checking run status + for run batch_run_name\n2023-10-18 08:17:36 +0000 31952 promptflow-runtime + INFO [12335--31952] Start processing flowV2......\n2023-10-18 08:17:36 + +0000 31952 promptflow-runtime INFO Setting mlflow tracking uri...\n2023-10-18 + 08:17:37 +0000 31952 promptflow-runtime INFO Validating ''AzureML Data + Scientist'' user authentication...\n2023-10-18 08:17:37 +0000 31952 promptflow-runtime + INFO Running , + 5 more tries to go.\n2023-10-18 08:17:37 +0000 31952 promptflow-runtime + INFO Successfully validated ''AzureML Data Scientist'' user authentication.\n2023-10-18 + 08:17:37 +0000 31952 promptflow-runtime INFO Using AzureMLRunStorageV2\n2023-10-18 + 08:17:37 +0000 31952 promptflow-runtime INFO Setting mlflow tracking + uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 08:17:37 +0000 31952 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 08:17:37 +0000 31952 + promptflow-runtime INFO Initialized blob service client for AzureMLRunTracker.\n2023-10-18 + 08:17:38 +0000 31952 promptflow-runtime INFO Setting mlflow tracking + uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 08:17:38 +0000 31952 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 08:17:38 +0000 31952 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:17:38 +0000 31952 + promptflow-runtime INFO Get snapshot sas url for ffd97207-e85e-4c1d-a502-230a527ab0db...\n2023-10-18 + 08:17:44 +0000 31952 promptflow-runtime INFO Downloading snapshot ffd97207-e85e-4c1d-a502-230a527ab0db + from uri {customer_content}...\n2023-10-18 08:17:44 +0000 31952 promptflow-runtime + INFO Downloaded file /service/app/42153/requests/batch_run_name/ffd97207-e85e-4c1d-a502-230a527ab0db.zip + with size 4651 for snapshot ffd97207-e85e-4c1d-a502-230a527ab0db.\n2023-10-18 + 08:17:44 +0000 31952 promptflow-runtime INFO Download snapshot ffd97207-e85e-4c1d-a502-230a527ab0db + completed.\n2023-10-18 08:17:44 +0000 31952 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 08:17:45 +0000 31952 promptflow-runtime INFO Resolve data from url finished + in 0.5276875569252297 seconds\n2023-10-18 08:17:45 +0000 31952 promptflow-runtime + INFO Flow run is not terminated, skip persisting flow run record.\n2023-10-18 + 08:17:45 +0000 31952 promptflow-runtime INFO Starting the aml run ''batch_run_name''...\n2023-10-18 + 08:17:45 +0000 31952 execution.bulk INFO Using fork, process count: + 3\n2023-10-18 08:17:45 +0000 32039 execution INFO Process 32039 + started.\n2023-10-18 08:17:45 +0000 32047 execution INFO Process + 32047 started.\n2023-10-18 08:17:45 +0000 32043 execution INFO Process + 32043 started.\n2023-10-18 08:17:46 +0000 32043 execution INFO Start + to run 5 nodes with concurrency level 2.\n2023-10-18 08:17:46 +0000 31952 + execution INFO Process name: Process-52:3, Process id: 32039, + Line number: 0 start execution.\n2023-10-18 08:17:46 +0000 32039 execution INFO Start + to run 5 nodes with concurrency level 2.\n2023-10-18 08:17:46 +0000 32047 + execution INFO Start to run 5 nodes with concurrency level 2.\n2023-10-18 + 08:17:46 +0000 31952 execution INFO Process name: Process-52:4, + Process id: 32047, Line number: 1 start execution.\n2023-10-18 08:17:46 +0000 31952 + promptflow-runtime INFO Flow run is not terminated, skip persisting flow + run record.\n2023-10-18 08:17:46 +0000 31952 execution INFO Process + name: Process-52:2, Process id: 32043, Line number: 2 start execution.\n2023-10-18 + 08:17:46 +0000 31952 promptflow-runtime INFO Flow run is not terminated, + skip persisting flow run record.\n2023-10-18 08:17:46 +0000 31952 promptflow-runtime + INFO Flow run is not terminated, skip persisting flow run record.\n2023-10-18 + 08:17:49 +0000 31952 execution INFO Process name: Process-52:3, + Process id: 32039, Line number: 0 completed.\n2023-10-18 08:17:49 +0000 31952 + execution.bulk INFO Finished 1 / 3 lines.\n2023-10-18 08:17:49 +0000 31952 + execution.bulk INFO Average execution time for completed lines: 3.95 + seconds. Estimated time for incomplete lines: 7.9 seconds.\n2023-10-18 08:17:54 + +0000 31952 execution INFO Process name: Process-52:2, Process + id: 32043, Line number: 2 completed.\n2023-10-18 08:17:54 +0000 31952 execution.bulk INFO Finished + 2 / 3 lines.\n2023-10-18 08:17:54 +0000 31952 execution.bulk INFO Average + execution time for completed lines: 4.45 seconds. Estimated time for incomplete + lines: 4.45 seconds.\n2023-10-18 08:17:56 +0000 12335 promptflow-runtime + INFO Running , 3 + more tries to go.\n2023-10-18 08:17:56 +0000 12335 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 08:17:57 +0000 12335 promptflow-runtime INFO Run batch_run_name is in + progress, Execution status: Running\n2023-10-18 08:18:00 +0000 31952 execution INFO Process + name: Process-52:4, Process id: 32047, Line number: 1 completed.\n2023-10-18 + 08:18:00 +0000 31952 execution.bulk INFO Finished 3 / 3 lines.\n2023-10-18 + 08:18:00 +0000 31952 execution.bulk INFO Average execution time + for completed lines: 4.95 seconds. Estimated time for incomplete lines: 0.0 + seconds.\n2023-10-18 08:18:04 +0000 31952 execution.bulk INFO Upload + status summary metrics for run batch_run_name finished in 2.5700535539072007 + seconds\n2023-10-18 08:18:04 +0000 31952 promptflow-runtime INFO Successfully + write run properties {\"azureml.promptflow.total_tokens\": 2448} with run + id ''batch_run_name''\n2023-10-18 08:18:04 +0000 31952 execution.bulk INFO Upload + RH properties for run batch_run_name finished in 0.060961320996284485 seconds\n2023-10-18 + 08:18:04 +0000 31952 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:04 +0000 31952 + promptflow-runtime INFO Creating unregistered output Asset for Run batch_run_name...\n2023-10-18 + 08:18:04 +0000 31952 promptflow-runtime INFO Created debug_info Asset: + azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_debug_info/versions/1\n2023-10-18 + 08:18:04 +0000 31952 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:04 +0000 31952 + promptflow-runtime INFO Creating unregistered output Asset for Run batch_run_name...\n2023-10-18 + 08:18:05 +0000 31952 promptflow-runtime INFO Created flow_outputs output + Asset: azureml://locations/eastus/workspaces/00000/data/azureml_batch_run_name_output_data_flow_outputs/versions/1\n2023-10-18 + 08:18:05 +0000 31952 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:05 +0000 31952 + promptflow-runtime INFO Creating Artifact for Run batch_run_name...\n2023-10-18 + 08:18:05 +0000 31952 promptflow-runtime INFO Created instance_results.jsonl + Artifact.\n2023-10-18 08:18:05 +0000 31952 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 08:18:05 +0000 31952 promptflow-runtime INFO Patching batch_run_name...\n2023-10-18 + 08:18:05 +0000 31952 promptflow-runtime INFO Ending the aml run ''batch_run_name'' + with status ''Completed''...\n2023-10-18 08:18:08 +0000 12335 promptflow-runtime + INFO Process 31952 finished\n2023-10-18 08:18:08 +0000 12335 promptflow-runtime + INFO [12335] Child process finished!\n2023-10-18 08:18:08 +0000 12335 + promptflow-runtime INFO [batch_run_name] End processing bulk run\n"' + headers: + connection: + - keep-alive + content-length: + - '10069' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.524' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name/logContent + response: + body: + string: '"2023-10-18 08:18:35 +0000 1095 promptflow-runtime INFO [eval_run_name] + Receiving v2 bulk run request d52c95c7-fcc5-4d4f-bf0b-c3e317907657: {customer_content}\n2023-10-18 + 08:18:35 +0000 1095 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:35 +0000 1095 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:35 +0000 1095 promptflow-runtime + INFO Running , + 3 more tries to go.\n2023-10-18 08:18:35 +0000 1095 promptflow-runtime + INFO Updating eval_run_name to Status.Preparing...\n2023-10-18 08:18:35 + +0000 1095 promptflow-runtime INFO Starting to check process 32716 + status for run eval_run_name\n2023-10-18 08:18:35 +0000 1095 promptflow-runtime + INFO Start checking run status for bulk run eval_run_name\n2023-10-18 + 08:18:35 +0000 1095 promptflow-runtime INFO Start checking run status + for run eval_run_name\n2023-10-18 08:18:35 +0000 32716 promptflow-runtime + INFO [1095--32716] Start processing flowV2......\n2023-10-18 08:18:35 + +0000 32716 promptflow-runtime INFO Setting mlflow tracking uri...\n2023-10-18 + 08:18:35 +0000 32716 promptflow-runtime INFO Validating ''AzureML Data + Scientist'' user authentication...\n2023-10-18 08:18:35 +0000 32716 promptflow-runtime + INFO Running , + 5 more tries to go.\n2023-10-18 08:18:36 +0000 32716 promptflow-runtime + INFO Successfully validated ''AzureML Data Scientist'' user authentication.\n2023-10-18 + 08:18:36 +0000 32716 promptflow-runtime INFO Using AzureMLRunStorageV2\n2023-10-18 + 08:18:36 +0000 32716 promptflow-runtime INFO Setting mlflow tracking + uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 08:18:36 +0000 32716 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 08:18:36 +0000 32716 + promptflow-runtime INFO Initialized blob service client for AzureMLRunTracker.\n2023-10-18 + 08:18:36 +0000 32716 promptflow-runtime INFO Setting mlflow tracking + uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 08:18:36 +0000 32716 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 08:18:36 +0000 32716 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:36 +0000 32716 + promptflow-runtime INFO Get snapshot sas url for 0339df11-0eba-49c2-8aef-12b177d75c22...\n2023-10-18 + 08:18:43 +0000 32716 promptflow-runtime INFO Downloading snapshot 0339df11-0eba-49c2-8aef-12b177d75c22 + from uri {customer_content}...\n2023-10-18 08:18:43 +0000 32716 promptflow-runtime + INFO Downloaded file /service/app/40599/requests/eval_run_name/0339df11-0eba-49c2-8aef-12b177d75c22.zip + with size 1754 for snapshot 0339df11-0eba-49c2-8aef-12b177d75c22.\n2023-10-18 + 08:18:43 +0000 32716 promptflow-runtime INFO Download snapshot 0339df11-0eba-49c2-8aef-12b177d75c22 + completed.\n2023-10-18 08:18:43 +0000 32716 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 08:18:43 +0000 32716 promptflow-runtime INFO Resolve data from url finished + in 0.5705237499205396 seconds\n2023-10-18 08:18:43 +0000 32716 promptflow-runtime + INFO Running , 3 more tries to + go.\n2023-10-18 08:18:44 +0000 32716 promptflow-runtime INFO Resolve + data from url finished in 0.6895102829439566 seconds\n2023-10-18 08:18:44 + +0000 32716 promptflow-runtime INFO Flow run is not terminated, skip + persisting flow run record.\n2023-10-18 08:18:44 +0000 32716 promptflow-runtime + INFO Starting the aml run ''eval_run_name''...\n2023-10-18 08:18:45 +0000 32716 + execution.bulk INFO Using fork, process count: 3\n2023-10-18 08:18:45 + +0000 32885 execution INFO Process 32885 started.\n2023-10-18 + 08:18:45 +0000 32890 execution INFO Process 32890 started.\n2023-10-18 + 08:18:45 +0000 32896 execution INFO Process 32896 started.\n2023-10-18 + 08:18:45 +0000 32716 execution INFO Process name: Process-78:2, + Process id: 32885, Line number: 0 start execution.\n2023-10-18 08:18:45 +0000 32885 + execution INFO Start to run 1 nodes with concurrency level 2.\n2023-10-18 + 08:18:45 +0000 32716 execution INFO Process name: Process-78:3, + Process id: 32890, Line number: 1 start execution.\n2023-10-18 08:18:45 +0000 32716 + promptflow-runtime INFO Flow run is not terminated, skip persisting flow + run record.\n2023-10-18 08:18:45 +0000 32716 execution INFO Process + name: Process-78:4, Process id: 32896, Line number: 2 start execution.\n2023-10-18 + 08:18:45 +0000 32716 promptflow-runtime INFO Flow run is not terminated, + skip persisting flow run record.\n2023-10-18 08:18:45 +0000 32896 execution INFO Start + to run 1 nodes with concurrency level 2.\n2023-10-18 08:18:45 +0000 32890 + execution INFO Start to run 1 nodes with concurrency level 2.\n2023-10-18 + 08:18:45 +0000 32716 promptflow-runtime INFO Flow run is not terminated, + skip persisting flow run record.\n2023-10-18 08:18:45 +0000 32716 execution INFO Process + name: Process-78:4, Process id: 32896, Line number: 2 completed.\n2023-10-18 + 08:18:45 +0000 32716 execution INFO Process name: Process-78:2, + Process id: 32885, Line number: 0 completed.\n2023-10-18 08:18:45 +0000 32716 + execution.bulk INFO Finished 2 / 3 lines.\n2023-10-18 08:18:45 +0000 32716 + execution.bulk INFO Average execution time for completed lines: 0.16 + seconds. Estimated time for incomplete lines: 0.16 seconds.\n2023-10-18 08:18:45 + +0000 32716 execution INFO Process name: Process-78:3, Process + id: 32890, Line number: 1 completed.\n2023-10-18 08:18:45 +0000 32716 execution.bulk INFO Finished + 2 / 3 lines.\n2023-10-18 08:18:45 +0000 32716 execution.bulk INFO Finished + 3 / 3 lines.\n2023-10-18 08:18:45 +0000 32716 execution.bulk INFO Average + execution time for completed lines: 0.17 seconds. Estimated time for incomplete + lines: 0.17 seconds.\n2023-10-18 08:18:45 +0000 32716 execution.bulk INFO Average + execution time for completed lines: 0.12 seconds. Estimated time for incomplete + lines: 0.0 seconds.\n2023-10-18 08:18:46 +0000 32716 execution INFO Executing + aggregation nodes...\n2023-10-18 08:18:46 +0000 32716 execution INFO Start + to run 1 nodes with concurrency level 2.\n2023-10-18 08:18:46 +0000 32716 + execution INFO Finish executing aggregation nodes.\n2023-10-18 + 08:18:47 +0000 32716 execution.bulk INFO Upload status summary metrics + for run eval_run_name finished in 1.2674665700178593 seconds\n2023-10-18 08:18:48 + +0000 32716 execution.bulk INFO Upload metrics for run eval_run_name + finished in 0.28492322098463774 seconds\n2023-10-18 08:18:48 +0000 32716 + promptflow-runtime INFO Successfully write run properties {\"azureml.promptflow.total_tokens\": + 0} with run id ''eval_run_name''\n2023-10-18 08:18:48 +0000 32716 execution.bulk INFO Upload + RH properties for run eval_run_name finished in 0.12331045896280557 seconds\n2023-10-18 + 08:18:48 +0000 32716 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:48 +0000 32716 + promptflow-runtime INFO Creating unregistered output Asset for Run eval_run_name...\n2023-10-18 + 08:18:48 +0000 32716 promptflow-runtime INFO Created debug_info Asset: + azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_debug_info/versions/1\n2023-10-18 + 08:18:48 +0000 32716 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:48 +0000 32716 + promptflow-runtime INFO Creating unregistered output Asset for Run eval_run_name...\n2023-10-18 + 08:18:49 +0000 32716 promptflow-runtime INFO Created flow_outputs output + Asset: azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_output_data_flow_outputs/versions/1\n2023-10-18 + 08:18:49 +0000 32716 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:49 +0000 32716 + promptflow-runtime INFO Creating Artifact for Run eval_run_name...\n2023-10-18 + 08:18:49 +0000 32716 promptflow-runtime INFO Created instance_results.jsonl + Artifact.\n2023-10-18 08:18:49 +0000 32716 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 08:18:49 +0000 32716 promptflow-runtime INFO Patching eval_run_name...\n2023-10-18 + 08:18:49 +0000 32716 promptflow-runtime INFO Ending the aml run ''eval_run_name'' + with status ''Completed''...\n2023-10-18 08:18:50 +0000 1095 promptflow-runtime + INFO Process 32716 finished\n2023-10-18 08:18:50 +0000 1095 promptflow-runtime + INFO [1095] Child process finished!\n2023-10-18 08:18:50 +0000 1095 + promptflow-runtime INFO [eval_run_name] End processing bulk run\n2023-10-18 + 08:18:55 +0000 1095 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:55 +0000 1095 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:18:55 +0000 1095 promptflow-runtime + INFO Run eval_run_name is in progress, Execution status: Completed\n"' + headers: + connection: + - keep-alive + content-length: + - '10790' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.604' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/eval_run_name_1/logContent + response: + body: + string: '"2023-10-18 08:19:15 +0000 12335 promptflow-runtime INFO [eval_run_name_1] + Receiving v2 bulk run request dc1c4c54-7977-42ec-a970-f058ade9b62e: {customer_content}\n2023-10-18 + 08:19:15 +0000 12335 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:19:15 +0000 12335 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:19:15 +0000 12335 promptflow-runtime + INFO Running , + 3 more tries to go.\n2023-10-18 08:19:15 +0000 12335 promptflow-runtime + INFO Updating eval_run_name_1 to Status.Preparing...\n2023-10-18 08:19:16 + +0000 12335 promptflow-runtime INFO Starting to check process 33345 + status for run eval_run_name_1\n2023-10-18 08:19:16 +0000 12335 promptflow-runtime + INFO Start checking run status for bulk run eval_run_name_1\n2023-10-18 + 08:19:16 +0000 12335 promptflow-runtime INFO Start checking run status + for run eval_run_name_1\n2023-10-18 08:19:16 +0000 33345 promptflow-runtime + INFO [12335--33345] Start processing flowV2......\n2023-10-18 08:19:16 + +0000 33345 promptflow-runtime INFO Setting mlflow tracking uri...\n2023-10-18 + 08:19:16 +0000 33345 promptflow-runtime INFO Validating ''AzureML Data + Scientist'' user authentication...\n2023-10-18 08:19:16 +0000 33345 promptflow-runtime + INFO Running , + 5 more tries to go.\n2023-10-18 08:19:16 +0000 33345 promptflow-runtime + INFO Successfully validated ''AzureML Data Scientist'' user authentication.\n2023-10-18 + 08:19:16 +0000 33345 promptflow-runtime INFO Using AzureMLRunStorageV2\n2023-10-18 + 08:19:17 +0000 33345 promptflow-runtime INFO Setting mlflow tracking + uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 08:19:17 +0000 33345 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 08:19:17 +0000 33345 + promptflow-runtime INFO Initialized blob service client for AzureMLRunTracker.\n2023-10-18 + 08:19:17 +0000 33345 promptflow-runtime INFO Setting mlflow tracking + uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 08:19:17 +0000 33345 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 08:19:17 +0000 33345 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:19:17 +0000 33345 + promptflow-runtime INFO Get snapshot sas url for 649ff0ec-49e7-41a6-83db-d66bd01b128b...\n2023-10-18 + 08:19:23 +0000 33345 promptflow-runtime INFO Downloading snapshot 649ff0ec-49e7-41a6-83db-d66bd01b128b + from uri {customer_content}...\n2023-10-18 08:19:23 +0000 33345 promptflow-runtime + INFO Downloaded file /service/app/42153/requests/eval_run_name_1/649ff0ec-49e7-41a6-83db-d66bd01b128b.zip + with size 1754 for snapshot 649ff0ec-49e7-41a6-83db-d66bd01b128b.\n2023-10-18 + 08:19:23 +0000 33345 promptflow-runtime INFO Download snapshot 649ff0ec-49e7-41a6-83db-d66bd01b128b + completed.\n2023-10-18 08:19:23 +0000 33345 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 08:19:24 +0000 33345 promptflow-runtime INFO Resolve data from url finished + in 0.79884185094852 seconds\n2023-10-18 08:19:24 +0000 33345 promptflow-runtime + INFO Running , 3 more tries to + go.\n2023-10-18 08:19:25 +0000 33345 promptflow-runtime INFO Resolve + data from url finished in 0.4549177068984136 seconds\n2023-10-18 08:19:25 + +0000 33345 promptflow-runtime INFO Flow run is not terminated, skip + persisting flow run record.\n2023-10-18 08:19:25 +0000 33345 promptflow-runtime + INFO Starting the aml run ''eval_run_name_1''...\n2023-10-18 08:19:25 + +0000 33345 promptflow-runtime ERROR Run eval_run_name_1 failed. Exception: + {customer_content}\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/runtime/runtime.py\", + line 501, in execute_bulk_run_request\n resolved_inputs = flow_executor.validate_and_apply_inputs_mapping(input_dicts, + request.inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 824, in validate_and_apply_inputs_mapping\n resolved_inputs = self._apply_inputs_mapping_for_all_lines(inputs, + inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 1195, in _apply_inputs_mapping_for_all_lines\n result = [FlowExecutor.apply_inputs_mapping(item, + inputs_mapping) for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 1195, in \n result = [FlowExecutor.apply_inputs_mapping(item, + inputs_mapping) for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 1065, in apply_inputs_mapping\n raise InputMappingError(\npromptflow.executor._errors.InputMappingError: + The input for batch run is incorrect. Couldn''t find these mapping relations: + ${data.variant_id}. Please make sure your input mapping keys and values match + your YAML input section and input data. For more information, refer to the + following documentation: https://microsoft.github.io/promptflow/how-to-guides/column-mapping.html.\n2023-10-18 + 08:19:26 +0000 33345 execution.bulk INFO Upload status summary metrics + for run eval_run_name_1 finished in 0.7952216330450028 seconds\n2023-10-18 + 08:19:26 +0000 33345 promptflow-runtime INFO Successfully write run + properties {\"azureml.promptflow.total_tokens\": 0} with run id ''eval_run_name_1''\n2023-10-18 + 08:19:26 +0000 33345 execution.bulk INFO Upload RH properties for + run eval_run_name_1 finished in 0.0800274059874937 seconds\n2023-10-18 08:19:26 + +0000 33345 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:19:26 +0000 33345 + promptflow-runtime INFO Creating unregistered output Asset for Run eval_run_name_1...\n2023-10-18 + 08:19:26 +0000 33345 promptflow-runtime INFO Created debug_info Asset: + azureml://locations/eastus/workspaces/00000/data/azureml_eval_run_name_1_output_data_debug_info/versions/1\n2023-10-18 + 08:19:26 +0000 33345 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:19:26 +0000 33345 + promptflow-runtime INFO Patching eval_run_name_1...\n2023-10-18 08:19:26 + +0000 33345 promptflow-runtime WARNING [eval_run_name_1] Run failed. Execution + stackTrace: Traceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/runtime/runtime.py\", + line 501, in execute_bulk_run_request\n resolved_inputs = flow_executor.validate_and_apply_inputs_mapping(input_dicts, + request.inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 824, in validate_and_apply_inputs_mapping\n resolved_inputs = self._apply_inputs_mapping_for_all_lines(inputs, + inputs_mapping)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 1195, in _apply_inputs_mapping_for_all_lines\n result = [FlowExecutor.apply_inputs_mapping(item, + inputs_mapping) for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 1195, in \n result = [FlowExecutor.apply_inputs_mapping(item, + inputs_mapping) for item in merged_list]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 1065, in apply_inputs_mapping\n raise InputMappingError(\n\n2023-10-18 + 08:19:27 +0000 33345 promptflow-runtime INFO Ending the aml run ''eval_run_name_1'' + with status ''Failed''...\n2023-10-18 08:19:29 +0000 12335 promptflow-runtime + INFO Process 33345 finished\n2023-10-18 08:19:29 +0000 12335 promptflow-runtime + INFO [12335] Child process finished!\n2023-10-18 08:19:29 +0000 12335 + promptflow-runtime INFO [eval_run_name_1] End processing bulk run\n2023-10-18 + 08:19:29 +0000 12335 promptflow-runtime ERROR Submit flow request failed + Code: 400 InnerException type: InputMappingError Exception type hierarchy: + UserError/ValidationError/InputMappingError\n2023-10-18 08:19:36 +0000 12335 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 08:19:36 +0000 12335 promptflow-runtime + INFO Running , 3 + more tries to go.\n2023-10-18 08:19:36 +0000 12335 promptflow-runtime INFO Run + eval_run_name_1 is in progress, Execution status: Failed\n"' + headers: + connection: + - keep-alive + content-length: + - '9978' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.522' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_flow_id_in_submission.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_flow_id_in_submission.yaml new file mode 100644 index 00000000000..977e4a7d1c2 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_flow_id_in_submission.yaml @@ -0,0 +1,806 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.026' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.097' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.112' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.085' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:09:38 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '21' + content-md5: + - ydz/NwecDDs8TenF9yuf5w== + content-type: + - application/octet-stream + last-modified: + - Thu, 27 Jul 2023 09:10:42 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 27 Jul 2023 09:10:42 GMT + x-ms-meta-name: + - 7852e804-21ad-4c3d-8439-d59c0d9e6e49 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 429f6800-072f-4abb-9fb2-03d7a3874754 + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:09:39 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.081' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.134' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:09:41 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '245' + content-md5: + - F+JA0a3CxcLYZ0ANRdlZbA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:30:10 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:30:09 GMT + x-ms-meta-name: + - 7eb4fee6-5edc-4ab3-905c-0a3a3c41d3a3 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:09:43 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.102' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.104' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:09:46 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '21' + content-md5: + - ydz/NwecDDs8TenF9yuf5w== + content-type: + - application/octet-stream + last-modified: + - Thu, 27 Jul 2023 09:10:42 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 27 Jul 2023 09:10:42 GMT + x-ms-meta-name: + - 7852e804-21ad-4c3d-8439-d59c0d9e6e49 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 429f6800-072f-4abb-9fb2-03d7a3874754 + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:09:47 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.079' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.093' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:09:50 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '245' + content-md5: + - F+JA0a3CxcLYZ0ANRdlZbA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:30:10 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:30:09 GMT + x-ms-meta-name: + - 7eb4fee6-5edc-4ab3-905c-0a3a3c41d3a3 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:09:51 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_detail_against_partial_fail_run.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_detail_against_partial_fail_run.yaml new file mode 100644 index 00000000000..7bf47f4528a --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_detail_against_partial_fail_run.yaml @@ -0,0 +1,1134 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.026' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.048' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.113' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.186' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:24:06 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/data.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '52' + content-md5: + - kHimciLnA7d3/I2LBUeLNA== + content-type: + - application/octet-stream + last-modified: + - Fri, 22 Sep 2023 09:37:22 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Fri, 22 Sep 2023 09:37:22 GMT + x-ms-meta-name: + - db87715d-65de-40cc-a281-09c0115699f3 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 6666ca9c-d7c3-4d85-b18c-12643adb9046 + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:24:07 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/data.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.137' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.103' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:24:10 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/partial_fail/data.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '52' + content-md5: + - kHimciLnA7d3/I2LBUeLNA== + content-type: + - application/octet-stream + last-modified: + - Fri, 22 Sep 2023 09:37:31 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Fri, 22 Sep 2023 09:37:30 GMT + x-ms-meta-name: + - aa1844d8-4898-4daa-8100-6140558fc7c9 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:24:11 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/partial_fail/data.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/partial_fail/flow.dag.yaml", + "runId": "name", "runDisplayName": "partial_fail", "runExperimentName": "partial_fail", + "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/data.jsonl"}, + "inputsMapping": {}, "connections": {}, "environmentVariables": {}, "runtimeName": + "demo-mir", "sessionId": "31858a8dfc61a642bb0ab6df4fc3ac7b3807de4ffead00d1", + "flowLineageId": "de293df4f50622090c0225852d59cd663b6b629e38728f7444fa0f12255a0647", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '708' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '11.843' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/childRuns?endIndex=24&startIndex=0 + response: + body: + string: '[{"run_id": "name_0", "status": "Completed", "error": null, "inputs": + {"key": "no", "line_number": 0}, "output": {"output": null}, "metrics": null, + "request": null, "parent_run_id": "name", "root_run_id": "name", "source_run_id": + null, "flow_id": "default_flow_id", "start_time": "2023-10-18T10:24:37.411729Z", + "end_time": "2023-10-18T10:24:37.476123Z", "index": 0, "api_calls": [{"name": + "get_env_var", "type": "Tool", "inputs": {"key": "no"}, "output": {"value": + null}, "start_time": 1697624677.472402, "end_time": 1697624677.473875, "error": + null, "children": null, "node_name": "print_env"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"duration": 0.064394, + "total_tokens": 0}, "result": {"output": null}, "upload_metrics": false}, + {"run_id": "name_2", "status": "Completed", "error": null, "inputs": {"key": + "matter", "line_number": 2}, "output": {"output": null}, "metrics": null, + "request": null, "parent_run_id": "name", "root_run_id": "name", "source_run_id": + null, "flow_id": "default_flow_id", "start_time": "2023-10-18T10:24:37.603757Z", + "end_time": "2023-10-18T10:24:37.623383Z", "index": 2, "api_calls": [{"name": + "get_env_var", "type": "Tool", "inputs": {"key": "matter"}, "output": {"value": + null}, "start_time": 1697624677.621441, "end_time": 1697624677.622396, "error": + null, "children": null, "node_name": "print_env"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"duration": 0.019626, + "total_tokens": 0}, "result": {"output": null}, "upload_metrics": false}, + {"run_id": "name_1", "status": "Failed", "error": {"message": "Execution failure + in ''print_env'': (Exception) expected raise!", "messageFormat": "Execution + failure in ''{node_name}'': {error_type_and_message}", "messageParameters": + {"node_name": "print_env", "error_type_and_message": "(Exception) expected + raise!"}, "referenceCode": "Tool/__pf_main__", "code": "UserError", "innerError": + {"code": "ToolExecutionError", "innerError": null}, "additionalInfo": [{"type": + "ToolExecutionErrorDetails", "info": {"type": "Exception", "message": "expected + raise!", "traceback": "Traceback (most recent call last):\n File \"/service/app/43647/requests/name/print_env.py\", + line 9, in get_env_var\n raise Exception(\"expected raise!\")\nException: + expected raise!\n", "filename": "/service/app/43647/requests/name/print_env.py", + "lineno": 9, "name": "get_env_var"}}], "debugInfo": {"type": "ToolExecutionError", + "message": "Execution failure in ''print_env'': (Exception) expected raise!", + "stackTrace": "\nThe above exception was the direct cause of the following + exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 896, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs, + context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 973, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context, + inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 993, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager).execute(context, + inputs, nodes, self._node_concurrency)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py\", + line 59, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py\", + line 48, in execute\n dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py\", + line 77, in _collect_outputs\n each_node_result = each_future.result()\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/concurrent/futures/_base.py\", + line 439, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/concurrent/futures/_base.py\", + line 391, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/concurrent/futures/thread.py\", + line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py\", + line 106, in _exec_single_node_in_thread\n result = f(**kwargs)\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/tool.py\", + line 57, in new_f\n return tool_invoker.invoke_tool(f, *args, **kwargs)\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_tool_invoker.py\", + line 19, in invoke_tool\n return cur_flow.invoke_tool_with_cache(f, argnames, + args, kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py\", + line 126, in invoke_tool_with_cache\n result = self.invoke_tool(f, args, + kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py\", + line 175, in invoke_tool\n raise ToolExecutionError(node_name=node_name, + module=f.__module__) from e\n", "innerException": {"type": "Exception", "message": + "expected raise!", "stackTrace": "Traceback (most recent call last):\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py\", + line 164, in invoke_tool\n return f(*args, **kwargs)\n File \"/service/app/43647/requests/name/print_env.py\", + line 9, in get_env_var\n raise Exception(\"expected raise!\")\n", "innerException": + null}}}, "inputs": {"key": "raise", "line_number": 1}, "output": null, "metrics": + null, "request": null, "parent_run_id": "name", "root_run_id": "name", "source_run_id": + null, "flow_id": "default_flow_id", "start_time": "2023-10-18T10:24:37.599762Z", + "end_time": "2023-10-18T10:24:37.856148Z", "index": 1, "api_calls": [{"name": + "get_env_var", "type": "Tool", "inputs": {"key": "raise"}, "output": null, + "start_time": 1697624677.815764, "end_time": 1697624677.836626, "error": {"message": + "Execution failure in ''print_env'': (Exception) expected raise!", "type": + "ToolExecutionError"}, "children": null, "node_name": "print_env"}], "variant_id": + "", "name": "", "description": "", "tags": null, "system_metrics": {"duration": + 0.256386, "total_tokens": 0}, "result": null, "upload_metrics": false}]' + headers: + connection: + - keep-alive + content-length: + - '6702' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '1.113' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/childRuns?endIndex=49&startIndex=25 + response: + body: + string: '[]' + headers: + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '1.031' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697624664, "rootRunId": "name", "createdUtc": + "2023-10-18T10:24:24.0915333+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": {"error": {"code": "UserError", "severity": + null, "message": "Execution failure in ''print_env'': (Exception) expected + raise!", "messageFormat": "{\"totalChildRuns\": 3, \"userErrorChildRuns\": + 1, \"systemErrorChildRuns\": 0, \"errorDetails\": [{\"code\": \"UserError/ToolExecutionError\", + \"messageFormat\": \"Execution failure in ''{node_name}'': {error_type_and_message}\", + \"count\": 1}]}", "messageParameters": {"node_name": "print_env", "error_type_and_message": + "(Exception) expected raise!"}, "referenceCode": "Tool/__pf_main__", "detailsUri": + null, "target": null, "details": [], "innerError": {"code": "ToolExecutionError", + "innerError": null}, "debugInfo": {"type": "ToolExecutionError", "message": + "Execution failure in ''print_env'': (Exception) expected raise!", "stackTrace": + "\nThe above exception was the direct cause of the following exception:\n\nTraceback + (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 896, in _exec\n output, nodes_outputs = self._traverse_nodes(inputs, + context)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 973, in _traverse_nodes\n nodes_outputs, bypassed_nodes = self._submit_to_scheduler(context, + inputs, batch_nodes)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_executor.py\", + line 993, in _submit_to_scheduler\n return FlowNodesScheduler(self._tools_manager).execute(context, + inputs, nodes, self._node_concurrency)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py\", + line 59, in execute\n raise e\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py\", + line 48, in execute\n dag_manager.complete_nodes(self._collect_outputs(completed_futures))\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py\", + line 77, in _collect_outputs\n each_node_result = each_future.result()\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/concurrent/futures/_base.py\", + line 439, in result\n return self.__get_result()\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/concurrent/futures/_base.py\", + line 391, in __get_result\n raise self._exception\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/concurrent/futures/thread.py\", + line 58, in run\n result = self.fn(*self.args, **self.kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_flow_nodes_scheduler.py\", + line 106, in _exec_single_node_in_thread\n result = f(**kwargs)\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/tool.py\", + line 57, in new_f\n return tool_invoker.invoke_tool(f, *args, **kwargs)\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/_tool_invoker.py\", + line 19, in invoke_tool\n return cur_flow.invoke_tool_with_cache(f, argnames, + args, kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py\", + line 126, in invoke_tool_with_cache\n result = self.invoke_tool(f, args, + kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py\", + line 175, in invoke_tool\n raise ToolExecutionError(node_name=node_name, + module=f.__module__) from e\n", "innerException": {"type": "Exception", "message": + "expected raise!", "stackTrace": "Traceback (most recent call last):\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py\", + line 164, in invoke_tool\n return f(*args, **kwargs)\n File \"/service/app/43647/requests/name/print_env.py\", + line 9, in get_env_var\n raise Exception(\"expected raise!\")\n", "innerException": + null, "data": null, "errorResponse": null}, "data": null, "errorResponse": + null}, "additionalInfo": [{"type": "ToolExecutionErrorDetails", "info": {"type": + "Exception", "message": "expected raise!", "traceback": "Traceback (most recent + call last):\n File \"/service/app/43647/requests/name/print_env.py\", line + 9, in get_env_var\n raise Exception(\"expected raise!\")\nException: expected + raise!\n", "filename": "/service/app/43647/requests/name/print_env.py", "lineno": + 9, "name": "get_env_var"}}]}, "correlation": null, "environment": null, "location": + null, "time": "2023-10-18T10:25:00.188757+00:00", "componentName": "promptflow-runtime/20231011.v2 + Designer/1.0 promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) promptflow/0.1.0b8.dev2"}, "warnings": + null, "revision": 6, "statusRevision": 3, "runUuid": "afc407e1-5fd0-46b8-ac6b-69c5842a56ed", + "parentRunUuid": null, "rootRunUuid": "afc407e1-5fd0-46b8-ac6b-69c5842a56ed", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:23.6308429", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "74013e41-d17e-462a-8db6-5c0e26c0368c", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "ec6824af-81f6-47fa-a07e-5a04ff0b94e7", + "upn": null}, "lastModifiedUtc": "2023-10-18T10:24:59.1521166+00:00", "duration": + "00:00:23.6308429", "cancelationReason": null, "currentAttemptId": 1, "runId": + "name", "parentRunId": null, "experimentId": "1848033e-509f-4c52-92ee-f0a0121fe99e", + "status": "Completed", "startTimeUtc": "2023-10-18T10:24:36.7853454+00:00", + "endTimeUtc": "2023-10-18T10:25:00.4161883+00:00", "scheduleId": null, "displayName": + "partial_fail", "name": null, "dataContainerId": "dcid.name", "description": + null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": + {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "31858a8dfc61a642bb0ab6df4fc3ac7b3807de4ffead00d1", + "azureml.promptflow.flow_lineage_id": "de293df4f50622090c0225852d59cd663b6b629e38728f7444fa0f12255a0647", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/bc20fa079592a8072922533f187e3184/partial_fail/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/bbbb2b4cfb3d236b4f9b6110fd82264c/data.jsonl", + "azureml.promptflow.snapshot_id": "eb245d41-8de6-46c3-9489-0fb86e9e5071", + "azureml.promptflow.total_tokens": "0"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId": + "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1", + "type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1", + "type": "UriFolder"}}}, "runDefinition": {"Nodes": [{"Name": "print_env", + "Type": "python", "Source": {"Type": "code", "Tool": null, "Path": "print_env.py"}, + "Inputs": {"key": "${inputs.key}"}, "Tool": "print_env.py", "Reduce": false, + "Comment": null, "Activate": null, "Api": null, "Provider": null, "Connection": + null, "Module": null}], "Tools": [{"Name": "Content Safety (Text Analyze)", + "Type": "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureContentSafetyConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5"}, {"Name": "Embedding", + "Type": "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", + "OpenAIConnection"], "Default": null, "Description": null, "Enum": null, "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "deployment_name": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["AzureOpenAIConnection"], "enabled_by_value": null, "model_list": ["text-embedding-ada-002", + "text-search-ada-doc-001", "text-search-ada-query-001"], "Capabilities": {"completion": + false, "chat_completion": false, "embeddings": true}, "dynamic_list": null, + "allow_manual_entry": false, "is_multi_select": false}, "input": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "model": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": + ["OpenAIConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Open AI''s embedding model to + create an embedding vector representing the input text.", "connection_type": + null, "Module": "promptflow.tools.embedding", "class_name": null, "Source": + null, "LkgCode": null, "Code": null, "Function": "embedding", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", "package_version": + "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", "Inputs": {"api": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + ["chat", "completion"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CustomConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "print_env.py", + "Type": "python", "Inputs": {"key": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "print_env.py", "LkgCode": null, "Code": null, + "Function": "get_env_var", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}], "Codes": null, "Inputs": {"key": {"Name": + null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false, "is_chat_history": null}}, "Outputs": {"output": {"Name": null, "Type": + "string", "Description": null, "Reference": "${print_env.output.value}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '36914' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.047' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/logContent + response: + body: + string: '"2023-10-18 10:24:27 +0000 68458 promptflow-runtime INFO [name] + Receiving v2 bulk run request 8a437d2b-6baa-416d-bd5b-70d5f0a9b7ef: {customer_content}\n2023-10-18 + 10:24:27 +0000 68458 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 10:24:27 +0000 68458 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 10:24:27 +0000 68458 promptflow-runtime + INFO Running , + 3 more tries to go.\n2023-10-18 10:24:27 +0000 68458 promptflow-runtime + INFO Updating name to Status.Preparing...\n2023-10-18 10:24:27 +0000 68458 + promptflow-runtime INFO Starting to check process 104538 status for run + name\n2023-10-18 10:24:27 +0000 68458 promptflow-runtime INFO Start + checking run status for bulk run name\n2023-10-18 10:24:27 +0000 68458 promptflow-runtime + INFO Start checking run status for run name\n2023-10-18 10:24:27 +0000 104538 + promptflow-runtime INFO [68458--104538] Start processing flowV2......\n2023-10-18 + 10:24:28 +0000 104538 promptflow-runtime INFO Setting mlflow tracking + uri...\n2023-10-18 10:24:28 +0000 104538 promptflow-runtime INFO Validating + ''AzureML Data Scientist'' user authentication...\n2023-10-18 10:24:28 +0000 104538 + promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 10:24:28 +0000 104538 + promptflow-runtime INFO Successfully validated ''AzureML Data Scientist'' + user authentication.\n2023-10-18 10:24:28 +0000 104538 promptflow-runtime + INFO Using AzureMLRunStorageV2\n2023-10-18 10:24:28 +0000 104538 promptflow-runtime + INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 10:24:29 +0000 104538 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 10:24:29 +0000 104538 + promptflow-runtime INFO Initialized blob service client for AzureMLRunTracker.\n2023-10-18 + 10:24:29 +0000 104538 promptflow-runtime INFO Setting mlflow tracking + uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 10:24:29 +0000 104538 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 10:24:29 +0000 104538 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 10:24:29 +0000 104538 + promptflow-runtime INFO Get snapshot sas url for eb245d41-8de6-46c3-9489-0fb86e9e5071...\n2023-10-18 + 10:24:35 +0000 104538 promptflow-runtime INFO Downloading snapshot eb245d41-8de6-46c3-9489-0fb86e9e5071 + from uri {customer_content}...\n2023-10-18 10:24:35 +0000 104538 promptflow-runtime + INFO Downloaded file /service/app/43647/requests/name/eb245d41-8de6-46c3-9489-0fb86e9e5071.zip + with size 733 for snapshot eb245d41-8de6-46c3-9489-0fb86e9e5071.\n2023-10-18 + 10:24:35 +0000 104538 promptflow-runtime INFO Download snapshot eb245d41-8de6-46c3-9489-0fb86e9e5071 + completed.\n2023-10-18 10:24:35 +0000 104538 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 10:24:36 +0000 104538 promptflow-runtime INFO Resolve data from url finished + in 0.7213978259824216 seconds\n2023-10-18 10:24:36 +0000 104538 promptflow-runtime + INFO Flow run is not terminated, skip persisting flow run record.\n2023-10-18 + 10:24:36 +0000 104538 promptflow-runtime INFO Starting the aml run ''name''...\n2023-10-18 + 10:24:36 +0000 104538 execution WARNING Starting run without column + mapping may lead to unexpected results. Please consult the following documentation + for more information: https://microsoft.github.io/promptflow/how-to-guides/column-mapping.html.\n2023-10-18 + 10:24:37 +0000 104538 execution.bulk INFO Using fork, process count: + 3\n2023-10-18 10:24:37 +0000 104572 execution INFO Process 104572 + started.\n2023-10-18 10:24:37 +0000 104576 execution INFO Start + to run 1 nodes with concurrency level 2.\n2023-10-18 10:24:37 +0000 104538 + execution INFO Process name: Process-90:3, Process id: 104576, + Line number: 0 start execution.\n2023-10-18 10:24:37 +0000 104538 promptflow-runtime + INFO Flow run is not terminated, skip persisting flow run record.\n2023-10-18 + 10:24:37 +0000 104538 execution INFO Process name: Process-90:3, + Process id: 104576, Line number: 0 completed.\n2023-10-18 10:24:37 +0000 104538 + execution.bulk INFO Finished 1 / 3 lines.\n2023-10-18 10:24:37 +0000 104580 + execution INFO Process 104580 started.\n2023-10-18 10:24:37 +0000 104538 + execution.bulk INFO Average execution time for completed lines: 0.23 + seconds. Estimated time for incomplete lines: 0.46 seconds.\n2023-10-18 10:24:37 + +0000 104538 execution INFO Process name: Process-90:4, Process + id: 104580, Line number: 1 start execution.\n2023-10-18 10:24:37 +0000 104576 + execution INFO Start to run 1 nodes with concurrency level 2.\n2023-10-18 + 10:24:37 +0000 104538 execution INFO Process name: Process-90:3, + Process id: 104576, Line number: 2 start execution.\n2023-10-18 10:24:37 +0000 104538 + promptflow-runtime INFO Flow run is not terminated, skip persisting flow + run record.\n2023-10-18 10:24:37 +0000 104538 promptflow-runtime INFO Flow + run is not terminated, skip persisting flow run record.\n2023-10-18 10:24:37 + +0000 104538 execution INFO Process name: Process-90:3, Process + id: 104576, Line number: 2 completed.\n2023-10-18 10:24:37 +0000 104538 execution.bulk INFO Finished + 2 / 3 lines.\n2023-10-18 10:24:37 +0000 104538 execution.bulk INFO Average + execution time for completed lines: 0.19 seconds. Estimated time for incomplete + lines: 0.19 seconds.\n2023-10-18 10:24:37 +0000 104580 execution INFO Start + to run 1 nodes with concurrency level 2.\n2023-10-18 10:24:37 +0000 104580 + execution ERROR Node print_env in line 1 failed. Exception: Execution + failure in ''print_env'': (Exception) expected raise!.\nTraceback (most recent + call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py\", + line 164, in invoke_tool\n return f(*args, **kwargs)\n File \"/service/app/43647/requests/name/print_env.py\", + line 9, in get_env_var\n raise Exception(\"expected raise!\")\nException: + expected raise!\n\nThe above exception was the direct cause of the following + exception:\n\nTraceback (most recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py\", + line 126, in invoke_tool_with_cache\n result = self.invoke_tool(f, args, + kwargs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/_core/flow_execution_context.py\", + line 175, in invoke_tool\n raise ToolExecutionError(node_name=node_name, + module=f.__module__) from e\npromptflow._core._errors.ToolExecutionError: + Execution failure in ''print_env'': (Exception) expected raise!\n2023-10-18 + 10:24:37 +0000 104580 execution ERROR One node execution failed, + cancel all running tasks. print_env.\n2023-10-18 10:24:37 +0000 104538 execution INFO Process + name: Process-90:4, Process id: 104580, Line number: 1 completed.\n2023-10-18 + 10:24:37 +0000 104538 execution.bulk INFO Finished 3 / 3 lines.\n2023-10-18 + 10:24:37 +0000 104538 execution.bulk INFO Average execution time + for completed lines: 0.19 seconds. Estimated time for incomplete lines: 0.0 + seconds.\n2023-10-18 10:24:47 +0000 68458 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 10:24:47 +0000 68458 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 10:24:48 +0000 68458 + promptflow-runtime INFO Run name is in progress, Execution status: Running\n2023-10-18 + 10:24:37 +0000 104572 execution INFO Process 104572 started.\n2023-10-18 + 10:24:59 +0000 104538 execution.bulk INFO Upload status summary metrics + for run name finished in 1.4370432270225137 seconds\n2023-10-18 10:24:59 +0000 104538 + promptflow-runtime INFO Successfully write run properties {\"azureml.promptflow.total_tokens\": + 0} with run id ''name''\n2023-10-18 10:24:59 +0000 104538 execution.bulk INFO Upload + RH properties for run name finished in 0.07237825903575867 seconds\n2023-10-18 + 10:24:59 +0000 104538 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 10:24:59 +0000 104538 + promptflow-runtime INFO Creating unregistered output Asset for Run name...\n2023-10-18 + 10:24:59 +0000 104538 promptflow-runtime INFO Created debug_info Asset: + azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1\n2023-10-18 + 10:24:59 +0000 104538 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 10:24:59 +0000 104538 + promptflow-runtime INFO Creating unregistered output Asset for Run name...\n2023-10-18 + 10:24:59 +0000 104538 promptflow-runtime INFO Created flow_outputs output + Asset: azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1\n2023-10-18 + 10:24:59 +0000 104538 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 10:24:59 +0000 104538 + promptflow-runtime INFO Creating Artifact for Run name...\n2023-10-18 + 10:25:00 +0000 104538 promptflow-runtime INFO Created instance_results.jsonl + Artifact.\n2023-10-18 10:25:00 +0000 104538 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 10:25:00 +0000 104538 promptflow-runtime INFO Patching name...\n2023-10-18 + 10:25:00 +0000 104538 promptflow-runtime INFO Ending the aml run ''name'' + with status ''Completed''...\n2023-10-18 10:25:00 +0000 68458 promptflow-runtime + INFO Process 104538 finished\n2023-10-18 10:25:00 +0000 68458 promptflow-runtime + INFO [68458] Child process finished!\n2023-10-18 10:25:00 +0000 68458 + promptflow-runtime INFO [name] End processing bulk run\n"' + headers: + connection: + - keep-alive + content-length: + - '11680' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.820' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_invalid_run_cases.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_invalid_run_cases.yaml new file mode 100644 index 00000000000..3454399d081 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_get_invalid_run_cases.yaml @@ -0,0 +1,146 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.025' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.084' + status: + code: 200 + message: OK +- request: + body: '{"runId": "non_exist_run", "selectRunMetadata": true, "selectRunDefinition": + true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"error": {"code": "UserError", "severity": null, "message": "Run runId=non_exist_run + was not found", "messageFormat": "Run {runId} was not found", "messageParameters": + {"runId": "runId=non_exist_run"}, "referenceCode": null, "detailsUri": null, + "target": null, "details": [], "innerError": {"code": "NotFoundError", "innerError": + null}, "debugInfo": null, "additionalInfo": null}, "correlation": {"operation": + "c6cd4c20f5d5db3e2e37ecbd5405efc8", "request": "c5db1da6f17d5d0e"}, "environment": + "eastus", "location": "eastus", "time": "2023-10-18T09:58:37.7134807+00:00", + "componentName": "run-history", "statusCode": 404}' + headers: + connection: + - keep-alive + content-length: + - '777' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.074' + status: + code: 404 + message: Run runId=63322010-a79d-4563-a252-ad8201f2d41d was not found +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_input_mapping_with_dict.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_input_mapping_with_dict.yaml new file mode 100644 index 00000000000..2d088fd4119 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_input_mapping_with_dict.yaml @@ -0,0 +1,929 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.029' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.077' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.083' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.109' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:53:35 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '379' + content-md5: + - lI/pz9jzTQ7Td3RHPL7y7w== + content-type: + - application/octet-stream + last-modified: + - Tue, 25 Jul 2023 06:21:56 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Tue, 25 Jul 2023 06:21:56 GMT + x-ms-meta-name: + - e0068493-1fbe-451c-96b3-cf6b013632ad + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 1f73938f-def0-4a75-b4d0-6b07a2378e1b + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:53:36 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.108' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.079' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:53:39 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/flow_with_dict_input/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '245' + content-md5: + - NJ+RHeG4z7emGpIQkHVUaA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:30:41 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:30:41 GMT + x-ms-meta-name: + - 45e1c95d-7502-4635-a7f0-f46ca730e5e1 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:53:40 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/flow_with_dict_input/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/flow_with_dict_input/flow.dag.yaml", + "runId": "name", "runDisplayName": "flow_with_dict_input", "runExperimentName": + "flow_with_dict_input", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl"}, + "inputsMapping": {"key": "{\"a\": 1}", "extra": "${data.url}"}, "connections": + {}, "environmentVariables": {}, "runtimeName": "demo-mir", "sessionId": "3d2f008a09980a5f5e8942bd5f4c92141c535ef210ac813d", + "flowLineageId": "3c6d1b895f186a7155e01b19db3460cb3ed8fa951d53c78bbede4ee2b2b2b4be", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '789' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '11.190' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697622832, "rootRunId": "name", "createdUtc": + "2023-10-18T09:53:52.1119145+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 5, + "statusRevision": 3, "runUuid": "463cbf6e-25c6-4e44-854b-5662938b0985", "parentRunUuid": + null, "rootRunUuid": "463cbf6e-25c6-4e44-854b-5662938b0985", "lastStartTimeUtc": + null, "currentComputeTime": null, "computeDuration": "00:00:04.0670258", "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "74013e41-d17e-462a-8db6-5c0e26c0368c", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "ec6824af-81f6-47fa-a07e-5a04ff0b94e7", + "upn": null}, "lastModifiedUtc": "2023-10-18T09:54:07.4898441+00:00", "duration": + "00:00:04.0670258", "cancelationReason": null, "currentAttemptId": 1, "runId": + "name", "parentRunId": null, "experimentId": "e3f25497-3a2f-4b85-9007-6878e87a4f82", + "status": "Completed", "startTimeUtc": "2023-10-18T09:54:04.5081276+00:00", + "endTimeUtc": "2023-10-18T09:54:08.5751534+00:00", "scheduleId": null, "displayName": + "flow_with_dict_input", "name": null, "dataContainerId": "dcid.name", "description": + null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": + {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "3d2f008a09980a5f5e8942bd5f4c92141c535ef210ac813d", + "azureml.promptflow.flow_lineage_id": "3c6d1b895f186a7155e01b19db3460cb3ed8fa951d53c78bbede4ee2b2b2b4be", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/1b5bfbe9fa8852efec4cb77ad2614122/flow_with_dict_input/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/74c11bba717480b2d6b04b8e746d09d7/webClassification3.jsonl", + "azureml.promptflow.inputs_mapping": "{\"key\":\"{\\\"a\\\": 1}\",\"extra\":\"${data.url}\"}", + "azureml.promptflow.snapshot_id": "06f24cd3-56b2-4eae-9d18-770b364e703a", + "azureml.promptflow.total_tokens": "0"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": {"debug_info": {"assetId": + "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1", + "type": "UriFolder"}, "flow_outputs": {"assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1", + "type": "UriFolder"}}}, "runDefinition": {"Nodes": [{"Name": "print_val", + "Type": "python", "Source": {"Type": "code", "Tool": null, "Path": "print_val.py"}, + "Inputs": {"key": "${inputs.key}"}, "Tool": "print_val.py", "Reduce": false, + "Comment": null, "Activate": null, "Api": null, "Provider": null, "Connection": + null, "Module": null}], "Tools": [{"Name": "Content Safety (Text Analyze)", + "Type": "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureContentSafetyConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5"}, {"Name": "Embedding", + "Type": "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", + "OpenAIConnection"], "Default": null, "Description": null, "Enum": null, "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "deployment_name": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["AzureOpenAIConnection"], "enabled_by_value": null, "model_list": ["text-embedding-ada-002", + "text-search-ada-doc-001", "text-search-ada-query-001"], "Capabilities": {"completion": + false, "chat_completion": false, "embeddings": true}, "dynamic_list": null, + "allow_manual_entry": false, "is_multi_select": false}, "input": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "model": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": + ["OpenAIConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Open AI''s embedding model to + create an embedding vector representing the input text.", "connection_type": + null, "Module": "promptflow.tools.embedding", "class_name": null, "Source": + null, "LkgCode": null, "Code": null, "Function": "embedding", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", "package_version": + "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", "Inputs": {"api": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + ["chat", "completion"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CustomConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "print_val.py", + "Type": "python", "Inputs": {"key": {"Name": null, "Type": ["object"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "print_val.py", "LkgCode": null, "Code": null, + "Function": "get_val", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}], "Codes": null, "Inputs": {"key": {"Name": + null, "Type": "object", "Default": null, "Description": null, "is_chat_input": + false, "is_chat_history": null}}, "Outputs": {"output": {"Name": null, "Type": + "string", "Description": null, "Reference": "${print_val.output.value}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '31595' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.053' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/name/logContent + response: + body: + string: '"2023-10-18 09:53:55 +0000 37001 promptflow-runtime INFO [name] + Receiving v2 bulk run request 75c76640-ecca-4c3a-9d17-abeb792d3425: {customer_content}\n2023-10-18 + 09:53:55 +0000 37001 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 09:53:55 +0000 37001 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 09:53:55 +0000 37001 promptflow-runtime + INFO Running , + 3 more tries to go.\n2023-10-18 09:53:55 +0000 37001 promptflow-runtime + INFO Updating name to Status.Preparing...\n2023-10-18 09:53:55 +0000 37001 + promptflow-runtime INFO Starting to check process 92513 status for run + name\n2023-10-18 09:53:55 +0000 37001 promptflow-runtime INFO Start + checking run status for bulk run name\n2023-10-18 09:53:55 +0000 37001 promptflow-runtime + INFO Start checking run status for run name\n2023-10-18 09:53:55 +0000 92513 + promptflow-runtime INFO [37001--92513] Start processing flowV2......\n2023-10-18 + 09:53:55 +0000 92513 promptflow-runtime INFO Setting mlflow tracking + uri...\n2023-10-18 09:53:55 +0000 92513 promptflow-runtime INFO Validating + ''AzureML Data Scientist'' user authentication...\n2023-10-18 09:53:55 +0000 92513 + promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 09:53:56 +0000 92513 + promptflow-runtime INFO Successfully validated ''AzureML Data Scientist'' + user authentication.\n2023-10-18 09:53:56 +0000 92513 promptflow-runtime + INFO Using AzureMLRunStorageV2\n2023-10-18 09:53:56 +0000 92513 promptflow-runtime + INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 09:53:56 +0000 92513 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 09:53:56 +0000 92513 + promptflow-runtime INFO Initialized blob service client for AzureMLRunTracker.\n2023-10-18 + 09:53:56 +0000 92513 promptflow-runtime INFO Setting mlflow tracking + uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-10-18 + 09:53:56 +0000 92513 promptflow-runtime INFO Running , 5 more tries to go.\n2023-10-18 09:53:56 +0000 92513 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 09:53:56 +0000 92513 + promptflow-runtime INFO Get snapshot sas url for 06f24cd3-56b2-4eae-9d18-770b364e703a...\n2023-10-18 + 09:54:03 +0000 92513 promptflow-runtime INFO Downloading snapshot 06f24cd3-56b2-4eae-9d18-770b364e703a + from uri {customer_content}...\n2023-10-18 09:54:03 +0000 92513 promptflow-runtime + INFO Downloaded file /service/app/45135/requests/name/06f24cd3-56b2-4eae-9d18-770b364e703a.zip + with size 590 for snapshot 06f24cd3-56b2-4eae-9d18-770b364e703a.\n2023-10-18 + 09:54:03 +0000 92513 promptflow-runtime INFO Download snapshot 06f24cd3-56b2-4eae-9d18-770b364e703a + completed.\n2023-10-18 09:54:03 +0000 92513 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 09:54:04 +0000 92513 promptflow-runtime INFO Resolve data from url finished + in 0.6662551360204816 seconds\n2023-10-18 09:54:04 +0000 92513 promptflow-runtime + INFO Flow run is not terminated, skip persisting flow run record.\n2023-10-18 + 09:54:04 +0000 92513 promptflow-runtime INFO Starting the aml run ''name''...\n2023-10-18 + 09:54:04 +0000 92513 execution.bulk INFO Using fork, process count: + 3\n2023-10-18 09:54:04 +0000 92545 execution INFO Process 92545 + started.\n2023-10-18 09:54:04 +0000 92549 execution INFO Process + 92549 started.\n2023-10-18 09:54:04 +0000 92553 execution INFO Process + 92553 started.\n2023-10-18 09:54:04 +0000 92513 execution INFO Process + name: Process-110:2, Process id: 92545, Line number: 0 start execution.\n2023-10-18 + 09:54:04 +0000 92545 execution INFO Start to run 1 nodes with + concurrency level 2.\n2023-10-18 09:54:04 +0000 92549 execution INFO Start + to run 1 nodes with concurrency level 2.\n2023-10-18 09:54:04 +0000 92513 + execution INFO Process name: Process-110:3, Process id: 92549, + Line number: 1 start execution.\n2023-10-18 09:54:04 +0000 92553 execution INFO Start + to run 1 nodes with concurrency level 2.\n2023-10-18 09:54:04 +0000 92513 + promptflow-runtime INFO Flow run is not terminated, skip persisting flow + run record.\n2023-10-18 09:54:04 +0000 92513 execution INFO Process + name: Process-110:4, Process id: 92553, Line number: 2 start execution.\n2023-10-18 + 09:54:04 +0000 92513 promptflow-runtime INFO Flow run is not terminated, + skip persisting flow run record.\n2023-10-18 09:54:04 +0000 92513 promptflow-runtime + INFO Flow run is not terminated, skip persisting flow run record.\n2023-10-18 + 09:54:05 +0000 92513 execution INFO Process name: Process-110:2, + Process id: 92545, Line number: 0 completed.\n2023-10-18 09:54:05 +0000 92513 + execution.bulk INFO Finished 1 / 3 lines.\n2023-10-18 09:54:05 +0000 92513 + execution.bulk INFO Average execution time for completed lines: 0.29 + seconds. Estimated time for incomplete lines: 0.58 seconds.\n2023-10-18 09:54:05 + +0000 92513 execution INFO Process name: Process-110:3, Process + id: 92549, Line number: 1 completed.\n2023-10-18 09:54:05 +0000 92513 execution.bulk INFO Finished + 2 / 3 lines.\n2023-10-18 09:54:05 +0000 92513 execution INFO Process + name: Process-110:4, Process id: 92553, Line number: 2 completed.\n2023-10-18 + 09:54:05 +0000 92513 execution.bulk INFO Average execution time + for completed lines: 0.17 seconds. Estimated time for incomplete lines: 0.17 + seconds.\n2023-10-18 09:54:05 +0000 92513 execution.bulk INFO Finished + 3 / 3 lines.\n2023-10-18 09:54:05 +0000 92513 execution.bulk INFO Average + execution time for completed lines: 0.12 seconds. Estimated time for incomplete + lines: 0.0 seconds.\n2023-10-18 09:54:07 +0000 92513 execution.bulk INFO Upload + status summary metrics for run name finished in 1.2474508670857176 seconds\n2023-10-18 + 09:54:07 +0000 92513 promptflow-runtime INFO Successfully write run + properties {\"azureml.promptflow.total_tokens\": 0} with run id ''name''\n2023-10-18 + 09:54:07 +0000 92513 execution.bulk INFO Upload RH properties for + run name finished in 0.08914249006193131 seconds\n2023-10-18 09:54:07 +0000 92513 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 09:54:07 +0000 92513 + promptflow-runtime INFO Creating unregistered output Asset for Run name...\n2023-10-18 + 09:54:07 +0000 92513 promptflow-runtime INFO Created debug_info Asset: + azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_debug_info/versions/1\n2023-10-18 + 09:54:07 +0000 92513 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 09:54:07 +0000 92513 + promptflow-runtime INFO Creating unregistered output Asset for Run name...\n2023-10-18 + 09:54:08 +0000 92513 promptflow-runtime INFO Created flow_outputs output + Asset: azureml://locations/eastus/workspaces/00000/data/azureml_name_output_data_flow_outputs/versions/1\n2023-10-18 + 09:54:08 +0000 92513 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 09:54:08 +0000 92513 + promptflow-runtime INFO Creating Artifact for Run name...\n2023-10-18 + 09:54:08 +0000 92513 promptflow-runtime INFO Created instance_results.jsonl + Artifact.\n2023-10-18 09:54:08 +0000 92513 promptflow-runtime INFO Running + , 3 more tries to go.\n2023-10-18 + 09:54:08 +0000 92513 promptflow-runtime INFO Patching name...\n2023-10-18 + 09:54:08 +0000 92513 promptflow-runtime INFO Ending the aml run ''name'' + with status ''Completed''...\n2023-10-18 09:54:09 +0000 37001 promptflow-runtime + INFO Process 92513 finished\n2023-10-18 09:54:09 +0000 37001 promptflow-runtime + INFO [37001] Child process finished!\n2023-10-18 09:54:09 +0000 37001 + promptflow-runtime INFO [name] End processing bulk run\n2023-10-18 09:54:15 + +0000 37001 promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 09:54:15 +0000 37001 + promptflow-runtime INFO Running , 3 more tries to go.\n2023-10-18 09:54:15 +0000 37001 promptflow-runtime + INFO Run name is in progress, Execution status: Completed\n"' + headers: + connection: + - keep-alive + content-length: + - '10073' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.703' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_list_runs.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_list_runs.yaml new file mode 100644 index 00000000000..a5d14071e59 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_list_runs.yaml @@ -0,0 +1,589 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.035' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.067' + status: + code: 200 + message: OK +- request: + body: '{"filters": [{"field": "type", "operator": "eq", "values": ["runs"]}, {"field": + "annotations/archived", "operator": "eq", "values": ["false"]}, {"field": "properties/runType", + "operator": "contains", "values": ["azureml.promptflow.FlowRun", "azureml.promptflow.EvaluationRun", + "azureml.promptflow.PairwiseEvaluationRun"]}], "freeTextSearch": "", "order": + [{"direction": "Desc", "field": "properties/creationContext/createdTime"}], + "pageSize": 10, "skip": 0, "includeTotalResultCount": true, "searchBuilder": + "AppendPrefix"}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '523' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/index/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/entities + response: + body: + string: '{"totalCount": 103372, "value": [{"relevancyScore": 0.28867513, "entityResourceName": + "promptflow-eastus", "highlights": {}, "usage": {"totalCount": 0}, "schemaId": + "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/ffa14fc8-a4c6-414b-a7b2-b8a97090f171", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "my_display_name_variant_0_202310180848", "status": "Running", "primaryMetricName": + null, "estimatedCost": null, "primaryMetricSummary": null, "metrics": {}, + "parameters": {}, "settings": {}, "modifiedTime": "2023-10-18T08:48:35.1508317Z", + "retainForLifetimeOfWorkspace": false, "error": {"code": null, "errorCodeHierarchy": + null, "message": null, "time": null, "componentName": null, "severity": null, + "detailsUri": null, "referenceCode": null}, "resourceMetricSummary": {"gpuUtilizationPercentLastHour": + null, "gpuMemoryUtilizationPercentLastHour": null, "gpuEnergyJoules": null, + "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": null, "chargedCpuMemoryMegabyteSeconds": + null, "chargedGpuSeconds": null, "chargedNodeUtilizationSeconds": null}, "computeDuration": + null, "computeDurationMilliseconds": null, "effectiveStartTimeUtc": "2023-10-18T08:48:35.1012031Z", + "name": null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:22.4138648+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.print_env_var_variant_0_20231018_084806_783003", + "targetName": null, "runName": null, "experimentName": "print_env_var", "runId": + "print_env_var_variant_0_20231018_084806_783003", "parentRunId": null, "rootRunId": + "print_env_var_variant_0_20231018_084806_783003", "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": {}, "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "scriptName": null, "experimentId": "6a87c3ae-5a75-4c5d-9eb9-5203b0062282", + "runUuid": "ffa14fc8-a4c6-414b-a7b2-b8a97090f171", "parentRunUuid": null, + "runNumber": 1697618902, "startTime": "2023-10-18T08:48:35.1012031Z", "endTime": + null, "computeRequest": null, "compute": null, "userProperties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "303ae2253884bf600b3a7b4e3f7cd1034f7ae51de179f842", + "azureml.promptflow.flow_lineage_id": "f62f35c174e78bd426fba55c56d2ffb88daa2b2445c4adbe93d67262ab5b0186", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/2fabaeafe8ecfa6c1f9f6d2700566545/print_env_var/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/c3cac0ab6642676732c296329fffd869/env_var_names.jsonl", + "azureml.promptflow.snapshot_id": "1a5be08d-aa16-4ee9-a8d0-fcb722b8f4ad"}, + "actionUris": {}, "duration": null, "durationMilliseconds": null}, "internal": + {}, "updateSequence": 3, "type": "runs", "version": null, "entityContainerId": + "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "entityObjectId": "ffa14fc8-a4c6-414b-a7b2-b8a97090f171", + "resourceType": "Workspace", "relationships": []}, {"relevancyScore": 0.28867513, + "entityResourceName": "promptflow-eastus", "highlights": {}, "usage": {"totalCount": + 0}, "schemaId": "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/236280cc-61df-44b3-ba85-7dc954c088be", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "print_env_var", "status": "Running", "primaryMetricName": null, "estimatedCost": + null, "primaryMetricSummary": null, "metrics": {}, "parameters": {}, "settings": + {}, "modifiedTime": "2023-10-18T08:48:35.5591742Z", "retainForLifetimeOfWorkspace": + false, "error": {"code": null, "errorCodeHierarchy": null, "message": null, + "time": null, "componentName": null, "severity": null, "detailsUri": null, + "referenceCode": null}, "resourceMetricSummary": {"gpuUtilizationPercentLastHour": + null, "gpuMemoryUtilizationPercentLastHour": null, "gpuEnergyJoules": null, + "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": null, "chargedCpuMemoryMegabyteSeconds": + null, "chargedGpuSeconds": null, "chargedNodeUtilizationSeconds": null}, "computeDuration": + null, "computeDurationMilliseconds": null, "effectiveStartTimeUtc": "2023-10-18T08:48:35.5270326Z", + "name": null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:22.2043503+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.print_env_var_variant_0_20231018_084811_106481", + "targetName": null, "runName": null, "experimentName": "print_env_var", "runId": + "print_env_var_variant_0_20231018_084811_106481", "parentRunId": null, "rootRunId": + "print_env_var_variant_0_20231018_084811_106481", "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": {}, "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "scriptName": null, "experimentId": "6a87c3ae-5a75-4c5d-9eb9-5203b0062282", + "runUuid": "236280cc-61df-44b3-ba85-7dc954c088be", "parentRunUuid": null, + "runNumber": 1697618902, "startTime": "2023-10-18T08:48:35.5270326Z", "endTime": + null, "computeRequest": null, "compute": null, "userProperties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "303ae2253884bf600b3a7b4e3f7cd1034f7ae51de179f842", + "azureml.promptflow.flow_lineage_id": "f62f35c174e78bd426fba55c56d2ffb88daa2b2445c4adbe93d67262ab5b0186", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/2fabaeafe8ecfa6c1f9f6d2700566545/print_env_var/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/c3cac0ab6642676732c296329fffd869/env_var_names.jsonl", + "azureml.promptflow.snapshot_id": "be86ce36-7162-41d0-8a2f-e883171bce18"}, + "actionUris": {}, "duration": null, "durationMilliseconds": null}, "internal": + {}, "updateSequence": 3, "type": "runs", "version": null, "entityContainerId": + "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "entityObjectId": "236280cc-61df-44b3-ba85-7dc954c088be", + "resourceType": "Workspace", "relationships": []}, {"relevancyScore": 0.28867513, + "entityResourceName": "promptflow-eastus", "highlights": {}, "usage": {"totalCount": + 0}, "schemaId": "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/1b1509f7-3ead-4f0b-9290-6ede34e6b316", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "flow_with_dict_input", "status": "Completed", "primaryMetricName": null, + "estimatedCost": null, "primaryMetricSummary": null, "metrics": {}, "parameters": + {}, "settings": {}, "modifiedTime": "2023-10-18T08:48:38.293153Z", "retainForLifetimeOfWorkspace": + false, "error": {"code": null, "errorCodeHierarchy": null, "message": null, + "time": null, "componentName": null, "severity": null, "detailsUri": null, + "referenceCode": null}, "resourceMetricSummary": {"gpuUtilizationPercentLastHour": + null, "gpuMemoryUtilizationPercentLastHour": null, "gpuEnergyJoules": null, + "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": null, "chargedCpuMemoryMegabyteSeconds": + null, "chargedGpuSeconds": null, "chargedNodeUtilizationSeconds": null}, "computeDuration": + "00:00:04.1286633", "computeDurationMilliseconds": 4128.6633, "effectiveStartTimeUtc": + null, "name": null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:22.0593937+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.flow_with_dict_input_variant_0_20231018_084810_040275", + "targetName": null, "runName": null, "experimentName": "flow_with_dict_input", + "runId": "flow_with_dict_input_variant_0_20231018_084810_040275", "parentRunId": + null, "rootRunId": "flow_with_dict_input_variant_0_20231018_084810_040275", + "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, + "traits": {}, "attribution": "PromptFlow", "computeType": "MIR_v2"}, "scriptName": + null, "experimentId": "e3f25497-3a2f-4b85-9007-6878e87a4f82", "runUuid": "1b1509f7-3ead-4f0b-9290-6ede34e6b316", + "parentRunUuid": null, "runNumber": 1697618902, "startTime": "2023-10-18T08:48:34.1337497Z", + "endTime": "2023-10-18T08:48:38.262413Z", "computeRequest": null, "compute": + null, "userProperties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "f25e17dfadfb728f7f55c1c689ff7bfcdc266c7cfbf77138", + "azureml.promptflow.flow_lineage_id": "59dd7ea905c541c8086ef906810da234dd72b68987cd5785f67f4eb13943b26b", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/6d71049bad682c8063c83b44ba260327/flow_with_dict_input/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/710002843bc088af1e61f18537317143/webClassification3.jsonl", + "azureml.promptflow.inputs_mapping": "{\"key\":\"{\\\"a\\\": 1}\",\"extra\":\"${data.url}\"}", + "azureml.promptflow.snapshot_id": "3ae24c20-5dc7-4436-904b-19b28afce7b4", + "azureml.promptflow.total_tokens": "0"}, "actionUris": {}, "duration": "00:00:04.1286633", + "durationMilliseconds": 4128.6633}, "internal": {}, "updateSequence": 5, "type": + "runs", "version": null, "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "entityObjectId": "1b1509f7-3ead-4f0b-9290-6ede34e6b316", "resourceType": + "Workspace", "relationships": [{"relationType": "CreatedBy", "targetEntityId": + null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_flow_with_dict_input_variant_0_20231018_084810_040275_output_data_debug_info/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}, + {"relationType": "CreatedBy", "targetEntityId": null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_flow_with_dict_input_variant_0_20231018_084810_040275_output_data_flow_outputs/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}]}, + {"relevancyScore": 0.28867513, "entityResourceName": "promptflow-eastus", + "highlights": {}, "usage": {"totalCount": 0}, "schemaId": "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", + "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/94b546f2-fbaa-4278-a9e1-f293cd610cb5", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "classification_accuracy_evaluation", "status": "Failed", "primaryMetricName": + null, "estimatedCost": null, "primaryMetricSummary": null, "metrics": {}, + "parameters": {}, "settings": {}, "modifiedTime": "2023-10-18T08:48:34.7079397Z", + "retainForLifetimeOfWorkspace": false, "error": {"code": "UserError", "errorCodeHierarchy": + "UserError/ValidationError/InputMappingError", "message": "The input for batch + run is incorrect. Couldn''t find these mapping relations: ${data.variant_id}. + Please make sure your input mapping keys and values match your YAML input + section and input data. For more information, refer to the following documentation: + https://microsoft.github.", "time": "2023-10-18T08:48:34.286481Z", "componentName": + "promptflow-runtime/20231011.v2 Designer/1.0 promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.9.18 (Linux-6.2.0-1012-azure-x86_64-with-glibc2.35) promptflow/0.1.0b8.dev2", + "severity": null, "detailsUri": null, "referenceCode": "Executor"}, "resourceMetricSummary": + {"gpuUtilizationPercentLastHour": null, "gpuMemoryUtilizationPercentLastHour": + null, "gpuEnergyJoules": null, "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": + null, "chargedCpuMemoryMegabyteSeconds": null, "chargedGpuSeconds": null, + "chargedNodeUtilizationSeconds": null}, "computeDuration": "00:00:01.9530004", + "computeDurationMilliseconds": 1953.0004, "effectiveStartTimeUtc": null, "name": + null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:19.4878261+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.classification_accuracy_evaluation_variant_0_20231018_084805_499564", + "targetName": null, "runName": null, "experimentName": "classification_accuracy_evaluation", + "runId": "classification_accuracy_evaluation_variant_0_20231018_084805_499564", + "parentRunId": null, "rootRunId": "classification_accuracy_evaluation_variant_0_20231018_084805_499564", + "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, + "traits": {}, "attribution": "PromptFlow", "computeType": "MIR_v2"}, "scriptName": + null, "experimentId": "dd66dff5-aa1a-4674-83c3-0c347f3ad863", "runUuid": "94b546f2-fbaa-4278-a9e1-f293cd610cb5", + "parentRunUuid": null, "runNumber": 1697618899, "startTime": "2023-10-18T08:48:32.7050612Z", + "endTime": "2023-10-18T08:48:34.6580616Z", "computeRequest": null, "compute": + null, "userProperties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "a21932356f0b71b9dde77d1e084a90f5b3a3b71ce94d33ea", + "azureml.promptflow.flow_lineage_id": "a390805d6770539e1c086c09225e431dc077d346beacf3edb18e22e5bf6fd57f", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/a2a981c28f0b6b938c08c441a756cd0d/classification_accuracy_evaluation/flow.dag.yaml", + "azureml.promptflow.input_run_id": "web_classification_variant_0_20231018_084639_662774", + "azureml.promptflow.inputs_mapping": "{\"groundtruth\":\"${run.inputs.url}\",\"prediction\":\"${run.outputs.category}\"}", + "azureml.promptflow.snapshot_id": "bda1aae3-e4f2-43cf-a992-20d7c00b002a", + "azureml.promptflow.total_tokens": "0"}, "actionUris": {}, "duration": "00:00:01.9530004", + "durationMilliseconds": 1953.0004}, "internal": {}, "updateSequence": 6, "type": + "runs", "version": null, "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "entityObjectId": "94b546f2-fbaa-4278-a9e1-f293cd610cb5", "resourceType": + "Workspace", "relationships": [{"relationType": "CreatedBy", "targetEntityId": + null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_classification_accuracy_evaluation_variant_0_20231018_084805_499564_output_data_debug_info/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}]}, + {"relevancyScore": 0.28867513, "entityResourceName": "promptflow-eastus", + "highlights": {}, "usage": {"totalCount": 0}, "schemaId": "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", + "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/af52ab5f-43fb-42b4-b20d-9ab2a4759ab7", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "web_classification", "status": "Running", "primaryMetricName": null, "estimatedCost": + null, "primaryMetricSummary": null, "metrics": {}, "parameters": {}, "settings": + {}, "modifiedTime": "2023-10-18T08:48:40.2296806Z", "retainForLifetimeOfWorkspace": + false, "error": {"code": null, "errorCodeHierarchy": null, "message": null, + "time": null, "componentName": null, "severity": null, "detailsUri": null, + "referenceCode": null}, "resourceMetricSummary": {"gpuUtilizationPercentLastHour": + null, "gpuMemoryUtilizationPercentLastHour": null, "gpuEnergyJoules": null, + "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": null, "chargedCpuMemoryMegabyteSeconds": + null, "chargedGpuSeconds": null, "chargedNodeUtilizationSeconds": null}, "computeDuration": + null, "computeDurationMilliseconds": null, "effectiveStartTimeUtc": "2023-10-18T08:48:31.512214Z", + "name": null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:18.7160783+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.web_classification_variant_0_20231018_084759_591479", + "targetName": null, "runName": null, "experimentName": "web_classification", + "runId": "web_classification_variant_0_20231018_084759_591479", "parentRunId": + null, "rootRunId": "web_classification_variant_0_20231018_084759_591479", + "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, + "traits": {}, "attribution": "PromptFlow", "computeType": "MIR_v2"}, "scriptName": + null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "runUuid": "af52ab5f-43fb-42b4-b20d-9ab2a4759ab7", + "parentRunUuid": null, "runNumber": 1697618898, "startTime": "2023-10-18T08:48:31.512214Z", + "endTime": null, "computeRequest": null, "compute": null, "userProperties": + {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "37910a55386b92933577982c8558805024b3e24e914d3c4c", + "azureml.promptflow.flow_lineage_id": "5743a4e13073efb15fa579cd8c5fc9b7572b8cf39c12ce19631c26d3290ccca5", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/0bdd98f38bda96f0376199542e83c4c4/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/1d56ae643dfd9934c32fbb3ecf5a07f5/webClassification1.jsonl", + "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": + "34a278f2-5699-439a-842f-8a7e4f63351b", "azureml.promptflow.total_tokens": + "844"}, "actionUris": {}, "duration": null, "durationMilliseconds": null}, + "internal": {}, "updateSequence": 4, "type": "runs", "version": null, "entityContainerId": + "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "entityObjectId": "af52ab5f-43fb-42b4-b20d-9ab2a4759ab7", + "resourceType": "Workspace", "relationships": []}, {"relevancyScore": 0.28867513, + "entityResourceName": "promptflow-eastus", "highlights": {}, "usage": {"totalCount": + 0}, "schemaId": "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/bb0fd38b-fa62-47b2-9422-32d1ab7e6138", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "web_classification", "status": "Running", "primaryMetricName": null, "estimatedCost": + null, "primaryMetricSummary": null, "metrics": {}, "parameters": {}, "settings": + {}, "modifiedTime": "2023-10-18T08:48:27.1215315Z", "retainForLifetimeOfWorkspace": + false, "error": {"code": null, "errorCodeHierarchy": null, "message": null, + "time": null, "componentName": null, "severity": null, "detailsUri": null, + "referenceCode": null}, "resourceMetricSummary": {"gpuUtilizationPercentLastHour": + null, "gpuMemoryUtilizationPercentLastHour": null, "gpuEnergyJoules": null, + "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": null, "chargedCpuMemoryMegabyteSeconds": + null, "chargedGpuSeconds": null, "chargedNodeUtilizationSeconds": null}, "computeDuration": + null, "computeDurationMilliseconds": null, "effectiveStartTimeUtc": "2023-10-18T08:48:27.0893551Z", + "name": null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:12.5781692+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.web_classification_variant_0_20231018_084756_169040", + "targetName": null, "runName": null, "experimentName": "web_classification", + "runId": "web_classification_variant_0_20231018_084756_169040", "parentRunId": + null, "rootRunId": "web_classification_variant_0_20231018_084756_169040", + "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, + "traits": {}, "attribution": "PromptFlow", "computeType": "MIR_v2"}, "scriptName": + null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "runUuid": "bb0fd38b-fa62-47b2-9422-32d1ab7e6138", + "parentRunUuid": null, "runNumber": 1697618892, "startTime": "2023-10-18T08:48:27.0893551Z", + "endTime": null, "computeRequest": null, "compute": null, "userProperties": + {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "37910a55386b92933577982c8558805024b3e24e914d3c4c", + "azureml.promptflow.flow_lineage_id": "5743a4e13073efb15fa579cd8c5fc9b7572b8cf39c12ce19631c26d3290ccca5", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/0bdd98f38bda96f0376199542e83c4c4/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml:webClassification1:1", "azureml.promptflow.inputs_mapping": + "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": "d7ca6d8f-c8a7-42d5-adf1-cbd71ace4b66"}, + "actionUris": {}, "duration": null, "durationMilliseconds": null}, "internal": + {}, "updateSequence": 3, "type": "runs", "version": null, "entityContainerId": + "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "entityObjectId": "bb0fd38b-fa62-47b2-9422-32d1ab7e6138", + "resourceType": "Workspace", "relationships": []}, {"relevancyScore": 0.28867513, + "entityResourceName": "promptflow-eastus", "highlights": {}, "usage": {"totalCount": + 0}, "schemaId": "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/8a993e45-c5a0-4399-99bf-ee359c634999", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "web_classification", "status": "Completed", "primaryMetricName": null, "estimatedCost": + null, "primaryMetricSummary": null, "metrics": {}, "parameters": {}, "settings": + {}, "modifiedTime": "2023-10-18T08:48:36.4553904Z", "retainForLifetimeOfWorkspace": + false, "error": {"code": null, "errorCodeHierarchy": null, "message": null, + "time": null, "componentName": null, "severity": null, "detailsUri": null, + "referenceCode": null}, "resourceMetricSummary": {"gpuUtilizationPercentLastHour": + null, "gpuMemoryUtilizationPercentLastHour": null, "gpuEnergyJoules": null, + "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": null, "chargedCpuMemoryMegabyteSeconds": + null, "chargedGpuSeconds": null, "chargedNodeUtilizationSeconds": null}, "computeDuration": + "00:00:13.8039508", "computeDurationMilliseconds": 13803.9508, "effectiveStartTimeUtc": + null, "name": null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:04.3439593+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.web_classification_variant_0_20231018_084748_491499", + "targetName": null, "runName": null, "experimentName": "web_classification", + "runId": "web_classification_variant_0_20231018_084748_491499", "parentRunId": + null, "rootRunId": "web_classification_variant_0_20231018_084748_491499", + "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, + "traits": {}, "attribution": "PromptFlow", "computeType": "MIR_v2"}, "scriptName": + null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "runUuid": "8a993e45-c5a0-4399-99bf-ee359c634999", + "parentRunUuid": null, "runNumber": 1697618884, "startTime": "2023-10-18T08:48:22.590604Z", + "endTime": "2023-10-18T08:48:36.3945548Z", "computeRequest": null, "compute": + null, "userProperties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "37910a55386b92933577982c8558805024b3e24e914d3c4c", + "azureml.promptflow.flow_lineage_id": "5743a4e13073efb15fa579cd8c5fc9b7572b8cf39c12ce19631c26d3290ccca5", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/0bdd98f38bda96f0376199542e83c4c4/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/710002843bc088af1e61f18537317143/webClassification3.jsonl", + "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": + "32f3a2e5-3cc6-46af-8ac9-3fdc455cdd6a", "azureml.promptflow.total_tokens": + "2432"}, "actionUris": {}, "duration": "00:00:13.8039508", "durationMilliseconds": + 13803.9508}, "internal": {}, "updateSequence": 5, "type": "runs", "version": + null, "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "entityObjectId": + "8a993e45-c5a0-4399-99bf-ee359c634999", "resourceType": "Workspace", "relationships": + [{"relationType": "CreatedBy", "targetEntityId": null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_web_classification_variant_0_20231018_084748_491499_output_data_debug_info/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}, + {"relationType": "CreatedBy", "targetEntityId": null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_web_classification_variant_0_20231018_084748_491499_output_data_flow_outputs/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}]}, + {"relevancyScore": 0.28867513, "entityResourceName": "promptflow-eastus", + "highlights": {}, "usage": {"totalCount": 0}, "schemaId": "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", + "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/a8b6ff0b-9f38-4515-86b2-1a9ffbe54370", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "web_classification", "status": "Completed", "primaryMetricName": null, "estimatedCost": + null, "primaryMetricSummary": null, "metrics": {}, "parameters": {}, "settings": + {}, "modifiedTime": "2023-10-18T08:48:31.5112706Z", "retainForLifetimeOfWorkspace": + false, "error": {"code": null, "errorCodeHierarchy": null, "message": null, + "time": null, "componentName": null, "severity": null, "detailsUri": null, + "referenceCode": null}, "resourceMetricSummary": {"gpuUtilizationPercentLastHour": + null, "gpuMemoryUtilizationPercentLastHour": null, "gpuEnergyJoules": null, + "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": null, "chargedCpuMemoryMegabyteSeconds": + null, "chargedGpuSeconds": null, "chargedNodeUtilizationSeconds": null}, "computeDuration": + "00:00:12.9206439", "computeDurationMilliseconds": 12920.6439, "effectiveStartTimeUtc": + null, "name": null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:04.305341+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.e9a75865-cd6d-4e1e-8aba-2089646a058a", "targetName": + null, "runName": null, "experimentName": "web_classification", "runId": "e9a75865-cd6d-4e1e-8aba-2089646a058a", + "parentRunId": null, "rootRunId": "e9a75865-cd6d-4e1e-8aba-2089646a058a", + "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, + "traits": {}, "attribution": "PromptFlow", "computeType": "MIR_v2"}, "scriptName": + null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "runUuid": "a8b6ff0b-9f38-4515-86b2-1a9ffbe54370", + "parentRunUuid": null, "runNumber": 1697618884, "startTime": "2023-10-18T08:48:18.5496962Z", + "endTime": "2023-10-18T08:48:31.4703401Z", "computeRequest": null, "compute": + null, "userProperties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "37910a55386b92933577982c8558805024b3e24e914d3c4c", + "azureml.promptflow.flow_lineage_id": "5743a4e13073efb15fa579cd8c5fc9b7572b8cf39c12ce19631c26d3290ccca5", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/0bdd98f38bda96f0376199542e83c4c4/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/1d56ae643dfd9934c32fbb3ecf5a07f5/webClassification1.jsonl", + "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": + "0365b2ea-7fda-433f-90b1-9cda99d5b9fc", "azureml.promptflow.total_tokens": + "790"}, "actionUris": {}, "duration": "00:00:12.9206439", "durationMilliseconds": + 12920.6439}, "internal": {}, "updateSequence": 5, "type": "runs", "version": + null, "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "entityObjectId": + "a8b6ff0b-9f38-4515-86b2-1a9ffbe54370", "resourceType": "Workspace", "relationships": + [{"relationType": "CreatedBy", "targetEntityId": null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_e9a75865-cd6d-4e1e-8aba-2089646a058a_output_data_debug_info/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}, + {"relationType": "CreatedBy", "targetEntityId": null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_e9a75865-cd6d-4e1e-8aba-2089646a058a_output_data_flow_outputs/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}]}, + {"relevancyScore": 0.28867513, "entityResourceName": "promptflow-eastus", + "highlights": {}, "usage": {"totalCount": 0}, "schemaId": "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", + "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/08d48bee-16ad-4760-b0e0-c6ef73504623", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "flow_with_dict_input", "status": "Completed", "primaryMetricName": null, + "estimatedCost": null, "primaryMetricSummary": null, "metrics": {}, "parameters": + {}, "settings": {}, "modifiedTime": "2023-10-18T08:48:26.9958536Z", "retainForLifetimeOfWorkspace": + false, "error": {"code": null, "errorCodeHierarchy": null, "message": null, + "time": null, "componentName": null, "severity": null, "detailsUri": null, + "referenceCode": null}, "resourceMetricSummary": {"gpuUtilizationPercentLastHour": + null, "gpuMemoryUtilizationPercentLastHour": null, "gpuEnergyJoules": null, + "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": null, "chargedCpuMemoryMegabyteSeconds": + null, "chargedGpuSeconds": null, "chargedNodeUtilizationSeconds": null}, "computeDuration": + "00:00:03.7221597", "computeDurationMilliseconds": 3722.1597, "effectiveStartTimeUtc": + null, "name": null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:04.0650589+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.flow_with_dict_input_variant_0_20231018_084753_740541", + "targetName": null, "runName": null, "experimentName": "flow_with_dict_input", + "runId": "flow_with_dict_input_variant_0_20231018_084753_740541", "parentRunId": + null, "rootRunId": "flow_with_dict_input_variant_0_20231018_084753_740541", + "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, + "traits": {}, "attribution": "PromptFlow", "computeType": "MIR_v2"}, "scriptName": + null, "experimentId": "e3f25497-3a2f-4b85-9007-6878e87a4f82", "runUuid": "08d48bee-16ad-4760-b0e0-c6ef73504623", + "parentRunUuid": null, "runNumber": 1697618884, "startTime": "2023-10-18T08:48:23.2063475Z", + "endTime": "2023-10-18T08:48:26.9285072Z", "computeRequest": null, "compute": + null, "userProperties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "f25e17dfadfb728f7f55c1c689ff7bfcdc266c7cfbf77138", + "azureml.promptflow.flow_lineage_id": "59dd7ea905c541c8086ef906810da234dd72b68987cd5785f67f4eb13943b26b", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/6d71049bad682c8063c83b44ba260327/flow_with_dict_input/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/710002843bc088af1e61f18537317143/webClassification3.jsonl", + "azureml.promptflow.inputs_mapping": "{\"key\":\"{\\\"a\\\": 1}\",\"extra\":\"${data.url}\"}", + "azureml.promptflow.snapshot_id": "f751b3ef-9dab-43ed-a28b-264631745877", + "azureml.promptflow.total_tokens": "0"}, "actionUris": {}, "duration": "00:00:03.7221597", + "durationMilliseconds": 3722.1597}, "internal": {}, "updateSequence": 5, "type": + "runs", "version": null, "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "entityObjectId": "08d48bee-16ad-4760-b0e0-c6ef73504623", "resourceType": + "Workspace", "relationships": [{"relationType": "CreatedBy", "targetEntityId": + null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_flow_with_dict_input_variant_0_20231018_084753_740541_output_data_debug_info/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}, + {"relationType": "CreatedBy", "targetEntityId": null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_flow_with_dict_input_variant_0_20231018_084753_740541_output_data_flow_outputs/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}]}, + {"relevancyScore": 0.28867513, "entityResourceName": "promptflow-eastus", + "highlights": {}, "usage": {"totalCount": 0}, "schemaId": "974ab09e-bfc2-56a6-9be4-97bcfe3d33ca", + "entityId": "azureml://location/eastus/workspaceId/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/type/runs/objectId/763c0fd0-6292-4553-95e8-05b61d9db05a", + "kind": "Unversioned", "annotations": {"archived": false, "tags": {}, "displayName": + "classification_accuracy_evaluation", "status": "Failed", "primaryMetricName": + null, "estimatedCost": null, "primaryMetricSummary": null, "metrics": {}, + "parameters": {}, "settings": {}, "modifiedTime": "2023-10-18T08:48:25.2108678Z", + "retainForLifetimeOfWorkspace": false, "error": {"code": "UserError", "errorCodeHierarchy": + "UserError/ValidationError/InputMappingError", "message": "The input for batch + run is incorrect. Couldn''t find these mapping relations: ${data.variant_id}. + Please make sure your input mapping keys and values match your YAML input + section and input data. For more information, refer to the following documentation: + https://microsoft.github.", "time": "2023-10-18T08:48:24.890932Z", "componentName": + "promptflow-runtime/20231011.v2 Designer/1.0 promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (macOS-12.6.9-x86_64-i386-64bit) promptflow/0.1.0b8.dev2", + "severity": null, "detailsUri": null, "referenceCode": "Executor"}, "resourceMetricSummary": + {"gpuUtilizationPercentLastHour": null, "gpuMemoryUtilizationPercentLastHour": + null, "gpuEnergyJoules": null, "resourceMetricNames": null}, "jobCost": {"chargedCpuCoreSeconds": + null, "chargedCpuMemoryMegabyteSeconds": null, "chargedGpuSeconds": null, + "chargedNodeUtilizationSeconds": null}, "computeDuration": "00:00:01.8560273", + "computeDurationMilliseconds": 1856.0273, "effectiveStartTimeUtc": null, "name": + null, "description": null}, "properties": {"updatedTime": "0001-01-01T00:00:00+00:00", + "creationContext": {"createdTime": "2023-10-18T08:48:03.7385802+00:00", "createdBy": + {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "userTenantId": "00000000-0000-0000-0000-000000000000", + "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587"}, "creationSource": null}, + "dataContainerId": "dcid.classification_accuracy_evaluation_variant_0_20231018_084753_414377", + "targetName": null, "runName": null, "experimentName": "classification_accuracy_evaluation", + "runId": "classification_accuracy_evaluation_variant_0_20231018_084753_414377", + "parentRunId": null, "rootRunId": "classification_accuracy_evaluation_variant_0_20231018_084753_414377", + "runType": "azureml.promptflow.FlowRun", "runTypeV2": {"orchestrator": null, + "traits": {}, "attribution": "PromptFlow", "computeType": "MIR_v2"}, "scriptName": + null, "experimentId": "dd66dff5-aa1a-4674-83c3-0c347f3ad863", "runUuid": "763c0fd0-6292-4553-95e8-05b61d9db05a", + "parentRunUuid": null, "runNumber": 1697618883, "startTime": "2023-10-18T08:48:23.3213908Z", + "endTime": "2023-10-18T08:48:25.1774181Z", "computeRequest": null, "compute": + null, "userProperties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "a21932356f0b71b9dde77d1e084a90f5b3a3b71ce94d33ea", + "azureml.promptflow.flow_lineage_id": "a390805d6770539e1c086c09225e431dc077d346beacf3edb18e22e5bf6fd57f", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/a2a981c28f0b6b938c08c441a756cd0d/classification_accuracy_evaluation/flow.dag.yaml", + "azureml.promptflow.input_run_id": "web_classification_variant_0_20231018_084543_346602", + "azureml.promptflow.inputs_mapping": "{\"groundtruth\":\"${run.inputs.url}\",\"prediction\":\"${run.outputs.category}\"}", + "azureml.promptflow.snapshot_id": "da0df39a-5bb7-441b-97e1-85d32e5463f7", + "azureml.promptflow.total_tokens": "0"}, "actionUris": {}, "duration": "00:00:01.8560273", + "durationMilliseconds": 1856.0273}, "internal": {}, "updateSequence": 6, "type": + "runs", "version": null, "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "entityObjectId": "763c0fd0-6292-4553-95e8-05b61d9db05a", "resourceType": + "Workspace", "relationships": [{"relationType": "CreatedBy", "targetEntityId": + null, "assetId": "azureml://locations/eastus/workspaces/00000/data/azureml_classification_accuracy_evaluation_variant_0_20231018_084753_414377_output_data_debug_info/versions/1", + "entityType": "data", "direction": "Output", "entityContainerId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"}]}], + "nextSkip": 10, "continuationToken": "H4sIAAAAAAAAA1XOsQrCMBCA4Xe5OYPaQchqLXRQKukmDoc59fBMJLlApfTdzSQ4__Dxz3AqlD5OUSmDPc_gHph878HCoYiyUsCgYOBIk7onv8GuVwb6PAiG_UTXohxDGwOBvaFkMtCy_4sdsvziGBVlF0s1K9Q0281yMdBxyjrgnepI0pFfVQtFZPkCuAy9D6AAAAA", + "entityContainerIdsToEntityContainerMetadata": {"3e123da1-f9a5-4c91-9234-8d9ffbb39ff5": + {"resourceId": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "subscriptionId": "96aede12-2f73-41cb-b983-6d11a904839b", + "resourceGroup": "promptflow", "resourceName": "promptflow-eastus", "entityContainerType": + "Workspace", "regions": [{"regionName": "eastus", "isPrimaryRegion": true}], + "tenantId": "00000000-0000-0000-0000-000000000000", "immutableResourceId": + "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", "isPublicResource": false}}, "resourcesNotQueriedReasons": + {}, "numberOfEntityContainersNotQueried": 0, "fanoutData": {"Multitenant": + {"nextSkip": 10, "isShardDone": false, "didShardFail": false, "totalCount": + 103372, "resourceIdsOnShardThisPage": ["3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"]}}, + "regionalFanoutState": {"shardFanoutStates": [{"shardId": "Multitenant", "nextSkip": + 10, "isPlanExecutionDone": false, "didPlanExecutionFail": false, "totalCount": + 103372, "resourceIdsOnShardThisPage": ["3e123da1-f9a5-4c91-9234-8d9ffbb39ff5"]}], + "firstPageStartTime": null}, "shardErrors": {}, "canSupportSkip": true}' + headers: + connection: + - keep-alive + content-length: + - '40127' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.089' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_pf_run_with_env_var.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_pf_run_with_env_var.yaml new file mode 100644 index 00000000000..978cc185153 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_pf_run_with_env_var.yaml @@ -0,0 +1,98 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.023' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.097' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk.yaml new file mode 100644 index 00000000000..c9e1252994a --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk.yaml @@ -0,0 +1,856 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.021' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.141' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.211' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.088' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 07:51:56 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '127' + content-md5: + - i/8q1x5YKzHv3Fd/R8lYUQ== + content-type: + - application/octet-stream + last-modified: + - Fri, 28 Jul 2023 12:34:52 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Fri, 28 Jul 2023 12:34:52 GMT + x-ms-meta-name: + - 13fa99dd-c98e-4f2a-a704-4295d4ed6f68 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 0367c5c6-9f53-4a75-8623-7e53699f0d0b + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 07:51:57 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification1.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.109' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.090' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 07:52:00 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Mon, 28 Aug 2023 14:22:57 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Mon, 28 Aug 2023 14:22:57 GMT + x-ms-meta-name: + - f8d42f9b-ad14-4f6d-ad92-08c1b6de1b0d + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 07:52:01 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "name", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "nodeVariant": "${summarize_text_content.variant_0}", + "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl"}, + "inputsMapping": {"url": "${data.url}"}, "connections": {}, "environmentVariables": + {}, "runtimeName": "demo-mir", "sessionId": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '814' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '20.601' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697615542, "rootRunId": "name", "createdUtc": + "2023-10-18T07:52:22.208754+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 2, + "statusRevision": 1, "runUuid": "e53783b1-16d2-4f07-87b5-f1540594e856", "parentRunUuid": + null, "rootRunUuid": "e53783b1-16d2-4f07-87b5-f1540594e856", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T07:52:22.208754+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId": + null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "status": "Preparing", + "startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName": + "web_classification", "name": null, "dataContainerId": "dcid.name", "description": + null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": + {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/623c2a5b51c1eb9639ec4374ee09eaaa/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/107bd3498e44deb2dccc53d2208d32b2/webClassification1.jsonl", + "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": + "f21c158e-5c37-4743-a43a-739202f29f1c"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "fetch_text_content_from_url", "Type": "python", "Source": + {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"}, + "Tool": "classify_with_llm.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}, {"Name": "convert_to_dict", "Type": "python", + "Source": {"Type": "code", "Tool": null, "Path": "convert_to_dict.py"}, "Inputs": + {"input_str": "${classify_with_llm.output}"}, "Tool": "convert_to_dict.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "summarize_text_content", + "Type": "llm", "Source": {"Type": "code", "Tool": null, "Path": "summarize_text_content.jinja2"}, + "Inputs": {"deployment_name": "text-davinci-003", "suffix": "", "max_tokens": + "128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False", + "stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of": + "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "Tool": + "summarize_text_content.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}], "Tools": [{"Name": "Content Safety (Text + Analyze)", "Type": "python", "Inputs": {"connection": {"Name": null, "Type": + ["AzureContentSafetyConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "hate_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "sexual_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "text": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "violence_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure + Content Safety to detect harmful content.", "connection_type": null, "Module": + "content_safety_text.tools.content_safety_text_tool", "class_name": null, + "Source": null, "LkgCode": null, "Code": null, "Function": "analyze_text", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-contentsafety", + "package_version": "0.0.5"}, {"Name": "Embedding", "Type": "python", "Inputs": + {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", + "Inputs": {"api": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["chat", "completion"], "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "connection": {"Name": null, "Type": ["CustomConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "classify_with_llm.jinja2", + "Type": "llm", "Inputs": {"examples": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "text_content": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "classify_with_llm.jinja2", "LkgCode": null, + "Code": null, "Function": null, "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + false, "package": null, "package_version": null}, {"Name": "convert_to_dict.py", + "Type": "python", "Inputs": {"input_str": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "convert_to_dict.py", "LkgCode": null, "Code": + null, "Function": "convert_to_dict", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "prepare_examples.py", "Type": "python", "Inputs": null, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "prepare_examples.py", "LkgCode": null, "Code": null, "Function": + "prepare_examples", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}, {"Name": "summarize_text_content.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content.jinja2", "LkgCode": + null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "summarize_text_content__variant_1.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}], "Codes": + null, "Inputs": {"url": {"Name": null, "Type": "string", "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '40510' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.040' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_from_yaml.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_from_yaml.yaml new file mode 100644 index 00000000000..c186a909a10 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_from_yaml.yaml @@ -0,0 +1,856 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.018' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.090' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.083' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.183' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:00:19 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '127' + content-md5: + - i/8q1x5YKzHv3Fd/R8lYUQ== + content-type: + - application/octet-stream + last-modified: + - Fri, 28 Jul 2023 12:34:52 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Fri, 28 Jul 2023 12:34:52 GMT + x-ms-meta-name: + - 13fa99dd-c98e-4f2a-a704-4295d4ed6f68 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 0367c5c6-9f53-4a75-8623-7e53699f0d0b + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:00:20 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification1.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.121' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.121' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:00:24 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Mon, 28 Aug 2023 14:22:57 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Mon, 28 Aug 2023 14:22:57 GMT + x-ms-meta-name: + - f8d42f9b-ad14-4f6d-ad92-08c1b6de1b0d + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:00:25 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "run_id", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "nodeVariant": "${summarize_text_content.variant_0}", + "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl"}, + "inputsMapping": {"url": "${data.url}"}, "connections": {}, "environmentVariables": + {"FOO": "BAR"}, "runtimeName": "demo-mir", "sessionId": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '826' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"run_id"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '12.319' + status: + code: 200 + message: OK +- request: + body: '{"runId": "run_id", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697616037, "rootRunId": "run_id", "createdUtc": + "2023-10-18T08:00:37.1512869+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 2, + "statusRevision": 1, "runUuid": "5ecb494d-1e59-4ec3-9bbd-1dda4204d175", "parentRunUuid": + null, "rootRunUuid": "5ecb494d-1e59-4ec3-9bbd-1dda4204d175", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:00:37.1512869+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "run_id", + "parentRunId": null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", + "status": "Preparing", "startTimeUtc": null, "endTimeUtc": null, "scheduleId": + null, "displayName": "web_classification", "name": null, "dataContainerId": + "dcid.run_id", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/623c2a5b51c1eb9639ec4374ee09eaaa/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/107bd3498e44deb2dccc53d2208d32b2/webClassification1.jsonl", + "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": + "0147eab5-419e-4d46-b6d9-173d9919672e"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "fetch_text_content_from_url", "Type": "python", "Source": + {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"}, + "Tool": "classify_with_llm.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}, {"Name": "convert_to_dict", "Type": "python", + "Source": {"Type": "code", "Tool": null, "Path": "convert_to_dict.py"}, "Inputs": + {"input_str": "${classify_with_llm.output}"}, "Tool": "convert_to_dict.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "summarize_text_content", + "Type": "llm", "Source": {"Type": "code", "Tool": null, "Path": "summarize_text_content.jinja2"}, + "Inputs": {"deployment_name": "text-davinci-003", "suffix": "", "max_tokens": + "128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False", + "stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of": + "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "Tool": + "summarize_text_content.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}], "Tools": [{"Name": "Content Safety (Text + Analyze)", "Type": "python", "Inputs": {"connection": {"Name": null, "Type": + ["AzureContentSafetyConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "hate_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "sexual_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "text": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "violence_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure + Content Safety to detect harmful content.", "connection_type": null, "Module": + "content_safety_text.tools.content_safety_text_tool", "class_name": null, + "Source": null, "LkgCode": null, "Code": null, "Function": "analyze_text", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-contentsafety", + "package_version": "0.0.5"}, {"Name": "Embedding", "Type": "python", "Inputs": + {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", + "Inputs": {"api": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["chat", "completion"], "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "connection": {"Name": null, "Type": ["CustomConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "classify_with_llm.jinja2", + "Type": "llm", "Inputs": {"examples": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "text_content": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "classify_with_llm.jinja2", "LkgCode": null, + "Code": null, "Function": null, "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + false, "package": null, "package_version": null}, {"Name": "convert_to_dict.py", + "Type": "python", "Inputs": {"input_str": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "convert_to_dict.py", "LkgCode": null, "Code": + null, "Function": "convert_to_dict", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "prepare_examples.py", "Type": "python", "Inputs": null, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "prepare_examples.py", "LkgCode": null, "Code": null, "Function": + "prepare_examples", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}, {"Name": "summarize_text_content.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content.jinja2", "LkgCode": + null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "summarize_text_content__variant_1.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}], "Codes": + null, "Inputs": {"url": {"Name": null, "Type": "string", "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '40512' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.058' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_not_exist.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_not_exist.yaml new file mode 100644 index 00000000000..c6fcfe2d43e --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_not_exist.yaml @@ -0,0 +1,98 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.023' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.114' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_without_retry.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_without_retry.yaml new file mode 100644 index 00000000000..8a29ed05dbd --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_bulk_without_retry.yaml @@ -0,0 +1,98 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.030' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.474' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_data_not_provided.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_data_not_provided.yaml new file mode 100644 index 00000000000..6598d3f1d89 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_data_not_provided.yaml @@ -0,0 +1,98 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.023' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.117' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_display_name_with_macro.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_display_name_with_macro.yaml new file mode 100644 index 00000000000..13bba5e3357 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_display_name_with_macro.yaml @@ -0,0 +1,777 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.026' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.142' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.146' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.126' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:42:00 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '21' + content-md5: + - ydz/NwecDDs8TenF9yuf5w== + content-type: + - application/octet-stream + last-modified: + - Thu, 27 Jul 2023 09:10:42 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 27 Jul 2023 09:10:42 GMT + x-ms-meta-name: + - 7852e804-21ad-4c3d-8439-d59c0d9e6e49 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 429f6800-072f-4abb-9fb2-03d7a3874754 + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:42:01 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.087' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.113' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:42:04 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '245' + content-md5: + - F+JA0a3CxcLYZ0ANRdlZbA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:30:10 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:30:09 GMT + x-ms-meta-name: + - 7eb4fee6-5edc-4ab3-905c-0a3a3c41d3a3 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:42:05 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml", + "runId": "name", "runDisplayName": "my_display_name_${variant_id}_${timestamp}", + "runExperimentName": "print_env_var", "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl"}, + "inputsMapping": {}, "connections": {}, "environmentVariables": {"API_BASE": + "${azure_open_ai_connection.api_base}"}, "runtimeName": "demo-mir", "sessionId": + "62adccc385dd5d078797bdd0d2e1c55e120f3d5216885b81", "flowLineageId": "f1efdb93dcf9b3c17e246e7bcf0e2c7398d7bc289f8dd2c3d8f808eacc63c31f", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '799' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '11.925' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697618537, "rootRunId": "name", "createdUtc": + "2023-10-18T08:42:17.5544819+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 2, + "statusRevision": 1, "runUuid": "b1eabdc3-cf8c-47ce-b980-10a779ba0606", "parentRunUuid": + null, "rootRunUuid": "b1eabdc3-cf8c-47ce-b980-10a779ba0606", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:42:17.5544819+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId": + null, "experimentId": "6a87c3ae-5a75-4c5d-9eb9-5203b0062282", "status": "Preparing", + "startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName": + "my_display_name_variant_0_202310180842", "name": null, "dataContainerId": + "dcid.name", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "62adccc385dd5d078797bdd0d2e1c55e120f3d5216885b81", + "azureml.promptflow.flow_lineage_id": "f1efdb93dcf9b3c17e246e7bcf0e2c7398d7bc289f8dd2c3d8f808eacc63c31f", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/3360ae705933fb90bcd290241ca0ece9/print_env_var/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/24ae753309d7e36d73d1c9d7d2a03845/env_var_names.jsonl", + "azureml.promptflow.snapshot_id": "91217a43-a6f2-490f-8434-1a6032eb4c8c"}, + "parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "print_env", "Type": "python", "Source": {"Type": "code", + "Tool": null, "Path": "print_env.py"}, "Inputs": {"key": "${inputs.key}"}, + "Tool": "print_env.py", "Reduce": false, "Comment": null, "Activate": null, + "Api": null, "Provider": null, "Connection": null, "Module": null}], "Tools": + [{"Name": "Content Safety (Text Analyze)", "Type": "python", "Inputs": {"connection": + {"Name": null, "Type": ["AzureContentSafetyConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5"}, {"Name": "Embedding", + "Type": "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", + "OpenAIConnection"], "Default": null, "Description": null, "Enum": null, "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "deployment_name": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["AzureOpenAIConnection"], "enabled_by_value": null, "model_list": ["text-embedding-ada-002", + "text-search-ada-doc-001", "text-search-ada-query-001"], "Capabilities": {"completion": + false, "chat_completion": false, "embeddings": true}, "dynamic_list": null, + "allow_manual_entry": false, "is_multi_select": false}, "input": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "model": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": + ["OpenAIConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Open AI''s embedding model to + create an embedding vector representing the input text.", "connection_type": + null, "Module": "promptflow.tools.embedding", "class_name": null, "Source": + null, "LkgCode": null, "Code": null, "Function": "embedding", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", "package_version": + "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", "Inputs": {"api": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + ["chat", "completion"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CustomConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "print_env.py", + "Type": "python", "Inputs": {"key": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "print_env.py", "LkgCode": null, "Code": null, + "Function": "get_env_var", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}], "Codes": null, "Inputs": {"key": {"Name": + null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false, "is_chat_history": null}}, "Outputs": {"output": {"Name": null, "Type": + "string", "Description": null, "Reference": "${print_env.output.value}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '30888' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.087' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_submission_exception.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_submission_exception.yaml new file mode 100644 index 00000000000..361c303e5b2 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_submission_exception.yaml @@ -0,0 +1,98 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.024' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.059' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_with_connection_overwrite.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_with_connection_overwrite.yaml new file mode 100644 index 00000000000..ee1bb291e5d --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_with_connection_overwrite.yaml @@ -0,0 +1,857 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.023' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.106' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.431' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.077' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:35:58 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '127' + content-md5: + - i/8q1x5YKzHv3Fd/R8lYUQ== + content-type: + - application/octet-stream + last-modified: + - Fri, 28 Jul 2023 12:34:52 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Fri, 28 Jul 2023 12:34:52 GMT + x-ms-meta-name: + - 13fa99dd-c98e-4f2a-a704-4295d4ed6f68 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 0367c5c6-9f53-4a75-8623-7e53699f0d0b + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:35:59 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification1.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.085' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.086' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:36:02 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Mon, 28 Aug 2023 14:22:57 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Mon, 28 Aug 2023 14:22:57 GMT + x-ms-meta-name: + - f8d42f9b-ad14-4f6d-ad92-08c1b6de1b0d + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:36:03 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "name", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "nodeVariant": "${summarize_text_content.variant_0}", + "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl"}, + "inputsMapping": {"url": "${data.url}"}, "connections": {"classify_with_llm": + {"connection": "azure_open_ai", "model": "gpt-3.5-turbo"}}, "environmentVariables": + {}, "runtimeName": "demo-mir", "sessionId": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '892' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '10.882' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697618174, "rootRunId": "name", "createdUtc": + "2023-10-18T08:36:14.8711376+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 2, + "statusRevision": 1, "runUuid": "83981e04-73af-4cbe-bc52-ac6e0a7539a1", "parentRunUuid": + null, "rootRunUuid": "83981e04-73af-4cbe-bc52-ac6e0a7539a1", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:36:14.8711376+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId": + null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "status": "Preparing", + "startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName": + "web_classification", "name": null, "dataContainerId": "dcid.name", "description": + null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": + {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/623c2a5b51c1eb9639ec4374ee09eaaa/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/107bd3498e44deb2dccc53d2208d32b2/webClassification1.jsonl", + "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": + "04f055dd-68c7-4293-a480-26df22bce706"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "fetch_text_content_from_url", "Type": "python", "Source": + {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}", + "model": "gpt-3.5-turbo"}, "Tool": "classify_with_llm.jinja2", "Reduce": false, + "Comment": null, "Activate": null, "Api": "completion", "Provider": "AzureOpenAI", + "Connection": "azure_open_ai", "Module": "promptflow.tools.aoai"}, {"Name": + "convert_to_dict", "Type": "python", "Source": {"Type": "code", "Tool": null, + "Path": "convert_to_dict.py"}, "Inputs": {"input_str": "${classify_with_llm.output}"}, + "Tool": "convert_to_dict.py", "Reduce": false, "Comment": null, "Activate": + null, "Api": null, "Provider": null, "Connection": null, "Module": null}, + {"Name": "summarize_text_content", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "summarize_text_content.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.2", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, + "Tool": "summarize_text_content.jinja2", "Reduce": false, "Comment": null, + "Activate": null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": + "azure_open_ai_connection", "Module": "promptflow.tools.aoai"}], "Tools": + [{"Name": "Content Safety (Text Analyze)", "Type": "python", "Inputs": {"connection": + {"Name": null, "Type": ["AzureContentSafetyConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5"}, {"Name": "Embedding", + "Type": "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", + "OpenAIConnection"], "Default": null, "Description": null, "Enum": null, "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "deployment_name": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["AzureOpenAIConnection"], "enabled_by_value": null, "model_list": ["text-embedding-ada-002", + "text-search-ada-doc-001", "text-search-ada-query-001"], "Capabilities": {"completion": + false, "chat_completion": false, "embeddings": true}, "dynamic_list": null, + "allow_manual_entry": false, "is_multi_select": false}, "input": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "model": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": + ["OpenAIConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Open AI''s embedding model to + create an embedding vector representing the input text.", "connection_type": + null, "Module": "promptflow.tools.embedding", "class_name": null, "Source": + null, "LkgCode": null, "Code": null, "Function": "embedding", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", "package_version": + "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", "Inputs": {"api": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + ["chat", "completion"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CustomConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "classify_with_llm.jinja2", + "Type": "llm", "Inputs": {"examples": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "text_content": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "classify_with_llm.jinja2", "LkgCode": null, + "Code": null, "Function": null, "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + false, "package": null, "package_version": null}, {"Name": "convert_to_dict.py", + "Type": "python", "Inputs": {"input_str": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "convert_to_dict.py", "LkgCode": null, "Code": + null, "Function": "convert_to_dict", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "prepare_examples.py", "Type": "python", "Inputs": null, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "prepare_examples.py", "LkgCode": null, "Code": null, "Function": + "prepare_examples", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}, {"Name": "summarize_text_content.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content.jinja2", "LkgCode": + null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "summarize_text_content__variant_1.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}], "Codes": + null, "Inputs": {"url": {"Name": null, "Type": "string", "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '40537' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.052' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_with_env_overwrite.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_with_env_overwrite.yaml new file mode 100644 index 00000000000..6a397b40064 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_with_env_overwrite.yaml @@ -0,0 +1,777 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.027' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.108' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.118' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.167' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:39:18 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '21' + content-md5: + - ydz/NwecDDs8TenF9yuf5w== + content-type: + - application/octet-stream + last-modified: + - Thu, 27 Jul 2023 09:10:42 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 27 Jul 2023 09:10:42 GMT + x-ms-meta-name: + - 7852e804-21ad-4c3d-8439-d59c0d9e6e49 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 429f6800-072f-4abb-9fb2-03d7a3874754 + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:39:19 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/env_var_names.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.068' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.148' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:39:22 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '245' + content-md5: + - F+JA0a3CxcLYZ0ANRdlZbA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:30:10 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:30:09 GMT + x-ms-meta-name: + - 7eb4fee6-5edc-4ab3-905c-0a3a3c41d3a3 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:39:23 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/print_env_var/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/print_env_var/flow.dag.yaml", + "runId": "name", "runDisplayName": "print_env_var", "runExperimentName": "print_env_var", + "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/env_var_names.jsonl"}, + "inputsMapping": {}, "connections": {}, "environmentVariables": {"API_BASE": + "${azure_open_ai_connection.api_base}"}, "runtimeName": "demo-mir", "sessionId": + "62adccc385dd5d078797bdd0d2e1c55e120f3d5216885b81", "flowLineageId": "f1efdb93dcf9b3c17e246e7bcf0e2c7398d7bc289f8dd2c3d8f808eacc63c31f", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '770' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '11.780' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697618375, "rootRunId": "name", "createdUtc": + "2023-10-18T08:39:35.8197751+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 2, + "statusRevision": 1, "runUuid": "eb3810fa-cdb7-49dd-8d26-0bc1ab4c7c62", "parentRunUuid": + null, "rootRunUuid": "eb3810fa-cdb7-49dd-8d26-0bc1ab4c7c62", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:39:35.8197751+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId": + null, "experimentId": "6a87c3ae-5a75-4c5d-9eb9-5203b0062282", "status": "Preparing", + "startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName": + "print_env_var", "name": null, "dataContainerId": "dcid.name", "description": + null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": + {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "62adccc385dd5d078797bdd0d2e1c55e120f3d5216885b81", + "azureml.promptflow.flow_lineage_id": "f1efdb93dcf9b3c17e246e7bcf0e2c7398d7bc289f8dd2c3d8f808eacc63c31f", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/3360ae705933fb90bcd290241ca0ece9/print_env_var/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/24ae753309d7e36d73d1c9d7d2a03845/env_var_names.jsonl", + "azureml.promptflow.snapshot_id": "bb66fd99-3518-41e0-9f64-ad0b4db3a42f"}, + "parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "print_env", "Type": "python", "Source": {"Type": "code", + "Tool": null, "Path": "print_env.py"}, "Inputs": {"key": "${inputs.key}"}, + "Tool": "print_env.py", "Reduce": false, "Comment": null, "Activate": null, + "Api": null, "Provider": null, "Connection": null, "Module": null}], "Tools": + [{"Name": "Content Safety (Text Analyze)", "Type": "python", "Inputs": {"connection": + {"Name": null, "Type": ["AzureContentSafetyConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "hate_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "sexual_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "violence_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure Content + Safety to detect harmful content.", "connection_type": null, "Module": "content_safety_text.tools.content_safety_text_tool", + "class_name": null, "Source": null, "LkgCode": null, "Code": null, "Function": + "analyze_text", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-contentsafety", "package_version": "0.0.5"}, {"Name": "Embedding", + "Type": "python", "Inputs": {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", + "OpenAIConnection"], "Default": null, "Description": null, "Enum": null, "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "deployment_name": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["AzureOpenAIConnection"], "enabled_by_value": null, "model_list": ["text-embedding-ada-002", + "text-search-ada-doc-001", "text-search-ada-query-001"], "Capabilities": {"completion": + false, "chat_completion": false, "embeddings": true}, "dynamic_list": null, + "allow_manual_entry": false, "is_multi_select": false}, "input": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "model": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "enabled_by": "connection", "enabled_by_type": + ["OpenAIConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Open AI''s embedding model to + create an embedding vector representing the input text.", "connection_type": + null, "Module": "promptflow.tools.embedding", "class_name": null, "Source": + null, "LkgCode": null, "Code": null, "Function": "embedding", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", "package_version": + "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", "Inputs": {"api": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + ["chat", "completion"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CustomConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "print_env.py", + "Type": "python", "Inputs": {"key": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "print_env.py", "LkgCode": null, "Code": null, + "Function": "get_env_var", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}], "Codes": null, "Inputs": {"key": {"Name": + null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false, "is_chat_history": null}}, "Outputs": {"output": {"Name": null, "Type": + "string", "Description": null, "Reference": "${print_env.output.value}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '30863' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.062' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_with_remote_data.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_with_remote_data.yaml new file mode 100644 index 00000000000..0b3304e8b06 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_with_remote_data.yaml @@ -0,0 +1,1306 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.033' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.064' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/webClassification1/versions/1 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/webClassification1/versions/1", + "name": "1", "type": "Microsoft.MachineLearningServices/workspaces/00000/versions", + "properties": {"description": null, "tags": {}, "properties": {}, "isArchived": + false, "isAnonymous": false, "autoDeleteSetting": null, "dataUri": "azureml://subscriptions/00000000-0000-0000-0000-000000000000/resourcegroups/00000/workspaces/00000/datastores/workspaceblobstore/paths/LocalUpload/eda0bdf303d802b35c788f378fa379f6/webClassification1.jsonl", + "stage": null, "intellectualProperty": null, "dataType": "uri_file"}, "systemData": + {"createdAt": "2023-07-28T06:38:13.7134012+00:00", "createdBy": "Han Wang", + "createdByType": "User", "lastModifiedAt": "2023-07-28T06:38:13.7232364+00:00"}}' + headers: + cache-control: + - no-cache + content-length: + - '967' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.069' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.075' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.116' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:44:51 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Mon, 28 Aug 2023 14:22:57 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Mon, 28 Aug 2023 14:22:57 GMT + x-ms-meta-name: + - f8d42f9b-ad14-4f6d-ad92-08c1b6de1b0d + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:44:52 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "name1", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "nodeVariant": "${summarize_text_content.variant_0}", + "batchDataInput": {"dataUri": "azureml:/subscriptions/96aede12-2f73-41cb-b983-6d11a904839b/resourceGroups/promptflow/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus/data/webClassification1/versions/1"}, + "inputsMapping": {"url": "${data.url}"}, "connections": {}, "environmentVariables": + {}, "runtimeName": "demo-mir", "sessionId": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '892' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name1"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '12.494' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.118' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.116' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:45:13 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Mon, 28 Aug 2023 14:22:57 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Mon, 28 Aug 2023 14:22:57 GMT + x-ms-meta-name: + - f8d42f9b-ad14-4f6d-ad92-08c1b6de1b0d + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 08:45:14 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "name2", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "nodeVariant": "${summarize_text_content.variant_0}", + "batchDataInput": {"dataUri": "azureml:webClassification1:1"}, "inputsMapping": + {"url": "${data.url}"}, "connections": {}, "environmentVariables": {}, "runtimeName": + "demo-mir", "sessionId": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '727' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name2"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '10.531' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name1", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697618705, "rootRunId": "name1", "createdUtc": + "2023-10-18T08:45:05.8278888+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 2, + "statusRevision": 1, "runUuid": "9a9e21ec-d8cb-472c-9e87-2033f3cc83b7", "parentRunUuid": + null, "rootRunUuid": "9a9e21ec-d8cb-472c-9e87-2033f3cc83b7", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:45:05.8278888+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name1", + "parentRunId": null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", + "status": "Preparing", "startTimeUtc": null, "endTimeUtc": null, "scheduleId": + null, "displayName": "web_classification", "name": null, "dataContainerId": + "dcid.name1", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/623c2a5b51c1eb9639ec4374ee09eaaa/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/data/webClassification1/versions/1", + "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": + "944cf479-04c1-419e-b54e-5fd27470b4cc"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "fetch_text_content_from_url", "Type": "python", "Source": + {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"}, + "Tool": "classify_with_llm.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}, {"Name": "convert_to_dict", "Type": "python", + "Source": {"Type": "code", "Tool": null, "Path": "convert_to_dict.py"}, "Inputs": + {"input_str": "${classify_with_llm.output}"}, "Tool": "convert_to_dict.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "summarize_text_content", + "Type": "llm", "Source": {"Type": "code", "Tool": null, "Path": "summarize_text_content.jinja2"}, + "Inputs": {"deployment_name": "text-davinci-003", "suffix": "", "max_tokens": + "128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False", + "stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of": + "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "Tool": + "summarize_text_content.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}], "Tools": [{"Name": "Content Safety (Text + Analyze)", "Type": "python", "Inputs": {"connection": {"Name": null, "Type": + ["AzureContentSafetyConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "hate_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "sexual_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "text": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "violence_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure + Content Safety to detect harmful content.", "connection_type": null, "Module": + "content_safety_text.tools.content_safety_text_tool", "class_name": null, + "Source": null, "LkgCode": null, "Code": null, "Function": "analyze_text", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-contentsafety", + "package_version": "0.0.5"}, {"Name": "Embedding", "Type": "python", "Inputs": + {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", + "Inputs": {"api": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["chat", "completion"], "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "connection": {"Name": null, "Type": ["CustomConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "classify_with_llm.jinja2", + "Type": "llm", "Inputs": {"examples": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "text_content": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "classify_with_llm.jinja2", "LkgCode": null, + "Code": null, "Function": null, "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + false, "package": null, "package_version": null}, {"Name": "convert_to_dict.py", + "Type": "python", "Inputs": {"input_str": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "convert_to_dict.py", "LkgCode": null, "Code": + null, "Function": "convert_to_dict", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "prepare_examples.py", "Type": "python", "Inputs": null, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "prepare_examples.py", "LkgCode": null, "Code": null, "Function": + "prepare_examples", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}, {"Name": "summarize_text_content.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content.jinja2", "LkgCode": + null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "summarize_text_content__variant_1.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}], "Codes": + null, "Inputs": {"url": {"Name": null, "Type": "string", "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '40590' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.060' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name2", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697618725, "rootRunId": "name2", "createdUtc": + "2023-10-18T08:45:25.7112092+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 2, + "statusRevision": 1, "runUuid": "85efc124-ca96-4281-b418-59843a04e2cb", "parentRunUuid": + null, "rootRunUuid": "85efc124-ca96-4281-b418-59843a04e2cb", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:45:25.7112092+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name2", + "parentRunId": null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", + "status": "Preparing", "startTimeUtc": null, "endTimeUtc": null, "scheduleId": + null, "displayName": "web_classification", "name": null, "dataContainerId": + "dcid.name2", "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20231011.v2", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/623c2a5b51c1eb9639ec4374ee09eaaa/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml:webClassification1:1", "azureml.promptflow.inputs_mapping": + "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": "f8adfd0d-388e-487b-ae43-90f0827cb97c"}, + "parameters": {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "fetch_text_content_from_url", "Type": "python", "Source": + {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"}, + "Tool": "classify_with_llm.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}, {"Name": "convert_to_dict", "Type": "python", + "Source": {"Type": "code", "Tool": null, "Path": "convert_to_dict.py"}, "Inputs": + {"input_str": "${classify_with_llm.output}"}, "Tool": "convert_to_dict.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "summarize_text_content", + "Type": "llm", "Source": {"Type": "code", "Tool": null, "Path": "summarize_text_content.jinja2"}, + "Inputs": {"deployment_name": "text-davinci-003", "suffix": "", "max_tokens": + "128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False", + "stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of": + "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "Tool": + "summarize_text_content.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}], "Tools": [{"Name": "Content Safety (Text + Analyze)", "Type": "python", "Inputs": {"connection": {"Name": null, "Type": + ["AzureContentSafetyConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "hate_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "sexual_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "text": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "violence_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure + Content Safety to detect harmful content.", "connection_type": null, "Module": + "content_safety_text.tools.content_safety_text_tool", "class_name": null, + "Source": null, "LkgCode": null, "Code": null, "Function": "analyze_text", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-contentsafety", + "package_version": "0.0.5"}, {"Name": "Embedding", "Type": "python", "Inputs": + {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", + "Inputs": {"api": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["chat", "completion"], "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "connection": {"Name": null, "Type": ["CustomConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "classify_with_llm.jinja2", + "Type": "llm", "Inputs": {"examples": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "text_content": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "classify_with_llm.jinja2", "LkgCode": null, + "Code": null, "Function": null, "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + false, "package": null, "package_version": null}, {"Name": "convert_to_dict.py", + "Type": "python", "Inputs": {"input_str": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "convert_to_dict.py", "LkgCode": null, "Code": + null, "Function": "convert_to_dict", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "prepare_examples.py", "Type": "python", "Inputs": null, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "prepare_examples.py", "LkgCode": null, "Code": null, "Function": + "prepare_examples", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}, {"Name": "summarize_text_content.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content.jinja2", "LkgCode": + null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "summarize_text_content__variant_1.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}], "Codes": + null, "Inputs": {"url": {"Name": null, "Type": "string", "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '40425' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.064' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_without_dump.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_without_dump.yaml new file mode 100644 index 00000000000..967adabf165 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_run_without_dump.yaml @@ -0,0 +1,856 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.024' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.094' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.122' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.146' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:51:54 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '127' + content-md5: + - i/8q1x5YKzHv3Fd/R8lYUQ== + content-type: + - application/octet-stream + last-modified: + - Fri, 28 Jul 2023 12:34:52 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Fri, 28 Jul 2023 12:34:52 GMT + x-ms-meta-name: + - 13fa99dd-c98e-4f2a-a704-4295d4ed6f68 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 0367c5c6-9f53-4a75-8623-7e53699f0d0b + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:51:55 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification1.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.087' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.077' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:51:58 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '590' + content-md5: + - lO4oQJbXkB2KYDp3GfsrCg== + content-type: + - application/octet-stream + last-modified: + - Mon, 28 Aug 2023 14:22:57 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Mon, 28 Aug 2023 14:22:57 GMT + x-ms-meta-name: + - f8d42f9b-ad14-4f6d-ad92-08c1b6de1b0d + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 09:51:59 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/web_classification/classify_with_llm.jinja2 + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: '{"flowDefinitionDataStoreName": "workspaceblobstore", "flowDefinitionBlobPath": + "LocalUpload/000000000000000000000000000000000000/web_classification/flow.dag.yaml", + "runId": "name", "runDisplayName": "web_classification", "runExperimentName": + "web_classification", "nodeVariant": "${summarize_text_content.variant_0}", + "batchDataInput": {"dataUri": "azureml://datastores/workspaceblobstore/paths/LocalUpload/000000000000000000000000000000000000/webClassification1.jsonl"}, + "inputsMapping": {"url": "${data.url}"}, "connections": {}, "environmentVariables": + {}, "runtimeName": "demo-mir", "sessionId": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "flowLineageId": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "runDisplayNameGenerationType": "UserProvidedMacro"}' + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '814' + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/submit + response: + body: + string: '"name"' + headers: + connection: + - keep-alive + content-length: + - '38' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '10.242' + status: + code: 200 + message: OK +- request: + body: '{"runId": "name", "selectRunMetadata": true, "selectRunDefinition": true, + "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1697622730, "rootRunId": "name", "createdUtc": + "2023-10-18T09:52:10.2787823+00:00", "createdBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "userId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", "token": null, + "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": 2, + "statusRevision": 1, "runUuid": "3594b761-b262-4ef1-a42c-3e6c286b984a", "parentRunUuid": + null, "rootRunUuid": "3594b761-b262-4ef1-a42c-3e6c286b984a", "lastStartTimeUtc": + null, "currentComputeTime": "00:00:00", "computeDuration": null, "effectiveStartTimeUtc": + null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T09:52:10.2787823+00:00", "duration": + null, "cancelationReason": null, "currentAttemptId": 1, "runId": "name", "parentRunId": + null, "experimentId": "d30efbeb-f81d-4cfa-b5cc-a0570a049009", "status": "Preparing", + "startTimeUtc": null, "endTimeUtc": null, "scheduleId": null, "displayName": + "web_classification", "name": null, "dataContainerId": "dcid.name", "description": + null, "hidden": false, "runType": "azureml.promptflow.FlowRun", "runTypeV2": + {"orchestrator": null, "traits": [], "attribution": "PromptFlow", "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20231011.v2", "azureml.promptflow.definition_file_name": "flow.dag.yaml", + "azureml.promptflow.session_id": "4dd8f4d5f44dfeb817d3438cf84bd739215d87afd9458597", + "azureml.promptflow.flow_lineage_id": "af1a6951de9be2ce13d3b58b23dbd8b6a0cd8fd4918ad9cb22b28fb8395fbcb0", + "azureml.promptflow.node_variant": "${summarize_text_content.variant_0}", + "azureml.promptflow.flow_definition_datastore_name": "workspaceblobstore", + "azureml.promptflow.flow_definition_blob_path": "LocalUpload/623c2a5b51c1eb9639ec4374ee09eaaa/web_classification/flow.dag.yaml", + "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/107bd3498e44deb2dccc53d2208d32b2/webClassification1.jsonl", + "azureml.promptflow.inputs_mapping": "{\"url\":\"${data.url}\"}", "azureml.promptflow.snapshot_id": + "17dad58f-b905-4733-8b0d-648b56000ebb"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "fetch_text_content_from_url", "Type": "python", "Source": + {"Type": "code", "Tool": null, "Path": "fetch_text_content_from_url.py"}, + "Inputs": {"fetch_url": "${inputs.url}"}, "Tool": "fetch_text_content_from_url.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "prepare_examples", "Type": + "python", "Source": {"Type": "code", "Tool": null, "Path": "prepare_examples.py"}, + "Inputs": {}, "Tool": "prepare_examples.py", "Reduce": false, "Comment": null, + "Activate": null, "Api": null, "Provider": null, "Connection": null, "Module": + null}, {"Name": "classify_with_llm", "Type": "llm", "Source": {"Type": "code", + "Tool": null, "Path": "classify_with_llm.jinja2"}, "Inputs": {"deployment_name": + "text-davinci-003", "suffix": "", "max_tokens": "128", "temperature": "0.1", + "top_p": "1.0", "logprobs": "", "echo": "False", "stop": "", "presence_penalty": + "0", "frequency_penalty": "0", "best_of": "1", "logit_bias": "", "url": "${inputs.url}", + "examples": "${prepare_examples.output}", "text_content": "${summarize_text_content.output}"}, + "Tool": "classify_with_llm.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}, {"Name": "convert_to_dict", "Type": "python", + "Source": {"Type": "code", "Tool": null, "Path": "convert_to_dict.py"}, "Inputs": + {"input_str": "${classify_with_llm.output}"}, "Tool": "convert_to_dict.py", + "Reduce": false, "Comment": null, "Activate": null, "Api": null, "Provider": + null, "Connection": null, "Module": null}, {"Name": "summarize_text_content", + "Type": "llm", "Source": {"Type": "code", "Tool": null, "Path": "summarize_text_content.jinja2"}, + "Inputs": {"deployment_name": "text-davinci-003", "suffix": "", "max_tokens": + "128", "temperature": "0.2", "top_p": "1.0", "logprobs": "", "echo": "False", + "stop": "", "presence_penalty": "0", "frequency_penalty": "0", "best_of": + "1", "logit_bias": "", "text": "${fetch_text_content_from_url.output}"}, "Tool": + "summarize_text_content.jinja2", "Reduce": false, "Comment": null, "Activate": + null, "Api": "completion", "Provider": "AzureOpenAI", "Connection": "azure_open_ai_connection", + "Module": "promptflow.tools.aoai"}], "Tools": [{"Name": "Content Safety (Text + Analyze)", "Type": "python", "Inputs": {"connection": {"Name": null, "Type": + ["AzureContentSafetyConnection"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "hate_category": {"Name": null, "Type": + ["string"], "Default": "medium_sensitivity", "Description": null, "Enum": + ["disable", "low_sensitivity", "medium_sensitivity", "high_sensitivity"], + "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, "model_list": + null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "self_harm_category": {"Name": null, "Type": ["string"], + "Default": "medium_sensitivity", "Description": null, "Enum": ["disable", + "low_sensitivity", "medium_sensitivity", "high_sensitivity"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "sexual_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "text": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "violence_category": {"Name": null, "Type": ["string"], "Default": "medium_sensitivity", + "Description": null, "Enum": ["disable", "low_sensitivity", "medium_sensitivity", + "high_sensitivity"], "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Azure + Content Safety to detect harmful content.", "connection_type": null, "Module": + "content_safety_text.tools.content_safety_text_tool", "class_name": null, + "Source": null, "LkgCode": null, "Code": null, "Function": "analyze_text", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-contentsafety", + "package_version": "0.0.5"}, {"Name": "Embedding", "Type": "python", "Inputs": + {"connection": {"Name": null, "Type": ["AzureOpenAIConnection", "OpenAIConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": "connection", "enabled_by_type": ["AzureOpenAIConnection"], + "enabled_by_value": null, "model_list": ["text-embedding-ada-002", "text-search-ada-doc-001", + "text-search-ada-query-001"], "Capabilities": {"completion": false, "chat_completion": + false, "embeddings": true}, "dynamic_list": null, "allow_manual_entry": false, + "is_multi_select": false}, "input": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "model": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["text-embedding-ada-002", "text-search-ada-doc-001", "text-search-ada-query-001"], + "enabled_by": "connection", "enabled_by_type": ["OpenAIConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Use Open + AI''s embedding model to create an embedding vector representing the input + text.", "connection_type": null, "Module": "promptflow.tools.embedding", "class_name": + null, "Source": null, "LkgCode": null, "Code": null, "Function": "embedding", + "action_type": null, "provider_config": null, "function_config": null, "Icon": + null, "Category": null, "Tags": null, "is_builtin": true, "package": "promptflow-tools", + "package_version": "0.1.0b8"}, {"Name": "Open Source LLM", "Type": "custom_llm", + "Inputs": {"api": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": ["chat", "completion"], "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "connection": {"Name": null, "Type": ["CustomConnection"], "Default": null, + "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "deployment_name": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "model_kwargs": {"Name": null, "Type": ["object"], + "Default": "{}", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Use an Open Source model from the Azure Model + catalog, deployed to an AzureML Online Endpoint for LLM Chat or Completion + API calls.", "connection_type": null, "Module": "promptflow.tools.open_source_llm", + "class_name": "OpenSourceLLM", "Source": null, "LkgCode": null, "Code": null, + "Function": "call", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Serp API", "Type": + "python", "Inputs": {"connection": {"Name": null, "Type": ["SerpConnection"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "engine": {"Name": null, "Type": ["string"], "Default": "google", "Description": + null, "Enum": ["google", "bing"], "enabled_by": null, "enabled_by_type": null, + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "location": + {"Name": null, "Type": ["string"], "Default": "", "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "num": {"Name": null, "Type": ["int"], "Default": + "10", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "query": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "safe": {"Name": null, "Type": ["string"], + "Default": "off", "Description": null, "Enum": ["active", "off"], "enabled_by": + null, "enabled_by_type": null, "enabled_by_value": null, "model_list": null, + "Capabilities": null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}}, "Outputs": null, "Description": "Use Serp API to obtain search results + from a specific search engine.", "connection_type": null, "Module": "promptflow.tools.serpapi", + "class_name": "SerpAPI", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-tools", "package_version": "0.1.0b8"}, {"Name": "Faiss Index Lookup", + "Type": "python", "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector": {"Name": null, "Type": ["list"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": "Search vector based query from the FAISS + index file.", "connection_type": null, "Module": "promptflow_vectordb.tool.faiss_index_lookup", + "class_name": "FaissIndexLookup", "Source": null, "LkgCode": null, "Code": + null, "Function": "search", "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + true, "package": "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": + "Vector DB Lookup", "Type": "python", "Inputs": {"class_name": {"Name": null, + "Type": ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "collection_name": {"Name": null, "Type": + ["string"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["QdrantConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "connection": {"Name": null, "Type": ["CognitiveSearchConnection", + "QdrantConnection", "WeaviateConnection"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "index_name": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "search_filters": {"Name": null, "Type": + ["object"], "Default": null, "Description": null, "Enum": null, "enabled_by": + "connection", "enabled_by_type": ["CognitiveSearchConnection", "QdrantConnection"], + "enabled_by_value": null, "model_list": null, "Capabilities": null, "dynamic_list": + null, "allow_manual_entry": false, "is_multi_select": false}, "search_params": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": "connection", "enabled_by_type": ["CognitiveSearchConnection", + "QdrantConnection"], "enabled_by_value": null, "model_list": null, "Capabilities": + null, "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": + false}, "text_field": {"Name": null, "Type": ["string"], "Default": null, + "Description": null, "Enum": null, "enabled_by": "connection", "enabled_by_type": + ["CognitiveSearchConnection", "QdrantConnection", "WeaviateConnection"], "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "top_k": {"Name": null, "Type": ["int"], + "Default": "3", "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "vector": {"Name": null, "Type": ["list"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "vector_field": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": "connection", + "enabled_by_type": ["CognitiveSearchConnection"], "enabled_by_value": null, + "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + vector based query from existing Vector Database.", "connection_type": null, + "Module": "promptflow_vectordb.tool.vector_db_lookup", "class_name": "VectorDBLookup", + "Source": null, "LkgCode": null, "Code": null, "Function": "search", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": true, "package": "promptflow-vectordb", + "package_version": "0.0.1"}, {"Name": "Vector Index Lookup", "Type": "python", + "Inputs": {"path": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "query": {"Name": null, "Type": ["object"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "top_k": {"Name": null, "Type": ["int"], "Default": "3", "Description": null, + "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}}, "Outputs": null, "Description": "Search + text or vector based query from AzureML Vector Index.", "connection_type": + null, "Module": "promptflow_vectordb.tool.vector_index_lookup", "class_name": + "VectorIndexLookup", "Source": null, "LkgCode": null, "Code": null, "Function": + "search", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": true, "package": + "promptflow-vectordb", "package_version": "0.0.1"}, {"Name": "classify_with_llm.jinja2", + "Type": "llm", "Inputs": {"examples": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}, + "text_content": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "enabled_by_value": + null, "model_list": null, "Capabilities": null, "dynamic_list": null, "allow_manual_entry": + false, "is_multi_select": false}, "url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "classify_with_llm.jinja2", "LkgCode": null, + "Code": null, "Function": null, "action_type": null, "provider_config": null, + "function_config": null, "Icon": null, "Category": null, "Tags": null, "is_builtin": + false, "package": null, "package_version": null}, {"Name": "convert_to_dict.py", + "Type": "python", "Inputs": {"input_str": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "convert_to_dict.py", "LkgCode": null, "Code": + null, "Function": "convert_to_dict", "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "fetch_text_content_from_url.py", + "Type": "python", "Inputs": {"fetch_url": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "fetch_text_content_from_url.py", "LkgCode": + null, "Code": null, "Function": "fetch_text_content_from_url", "action_type": + null, "provider_config": null, "function_config": null, "Icon": null, "Category": + null, "Tags": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "prepare_examples.py", "Type": "python", "Inputs": null, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "prepare_examples.py", "LkgCode": null, "Code": null, "Function": + "prepare_examples", "action_type": null, "provider_config": null, "function_config": + null, "Icon": null, "Category": null, "Tags": null, "is_builtin": false, "package": + null, "package_version": null}, {"Name": "summarize_text_content.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content.jinja2", "LkgCode": + null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}, {"Name": "summarize_text_content__variant_1.jinja2", + "Type": "llm", "Inputs": {"text": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "enabled_by_value": null, "model_list": null, "Capabilities": null, + "dynamic_list": null, "allow_manual_entry": false, "is_multi_select": false}}, + "Outputs": null, "Description": null, "connection_type": null, "Module": null, + "class_name": null, "Source": "summarize_text_content__variant_1.jinja2", + "LkgCode": null, "Code": null, "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "Icon": null, "Category": null, "Tags": null, + "is_builtin": false, "package": null, "package_version": null}], "Codes": + null, "Inputs": {"url": {"Name": null, "Type": "string", "Default": "https://www.microsoft.com/en-us/d/xbox-wireless-controller-stellar-shift-special-edition/94fbjc7h0h6h", + "Description": null, "is_chat_input": false, "is_chat_history": null}}, "Outputs": + {"category": {"Name": null, "Type": "string", "Description": null, "Reference": + "${convert_to_dict.output.category}", "evaluation_only": false, "is_chat_output": + false}, "evidence": {"Name": null, "Type": "string", "Description": null, + "Reference": "${convert_to_dict.output.evidence}", "evaluation_only": false, + "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '40512' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.065' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_metrics.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_metrics.yaml new file mode 100644 index 00000000000..93e11a46e11 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_metrics.yaml @@ -0,0 +1,425 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.039' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.054' + status: + code: 200 + message: OK +- request: + body: '{}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/metric/v2.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/runs/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/lastvalues + response: + body: + string: '{"value": [{"dataContainerId": "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "name": "gpt_relevance.variant_1", "columns": {"gpt_relevance.variant_1": + "Double"}, "properties": {"uxMetricType": "azureml.v1.scalar", "dataLocation": + null}, "namespace": null, "standardSchemaId": null, "value": [{"metricId": + "cc487258-b6e8-4ab3-9fe2-05c38d1e4854", "createdUtc": "2023-06-26T03:23:44.905+00:00", + "step": 0, "data": {"gpt_relevance.variant_1": 1.0}}]}, {"dataContainerId": + "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "name": "gpt_relevance.variant_0", + "columns": {"gpt_relevance.variant_0": "Double"}, "properties": {"uxMetricType": + "azureml.v1.scalar", "dataLocation": null}, "namespace": null, "standardSchemaId": + null, "value": [{"metricId": "988ef509-2eae-488f-bdef-3637ad2f53bc", "createdUtc": + "2023-06-26T03:23:45.387+00:00", "step": 0, "data": {"gpt_relevance.variant_0": + 1.0}}]}, {"dataContainerId": "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "name": "gpt_relevance_pass_rate(%).variant_1", "columns": {"gpt_relevance_pass_rate(%).variant_1": + "Double"}, "properties": {"uxMetricType": "azureml.v1.scalar", "dataLocation": + null}, "namespace": null, "standardSchemaId": null, "value": [{"metricId": + "2847628d-f09f-4ad5-9d83-30d9f7b49d64", "createdUtc": "2023-06-26T03:23:45.941+00:00", + "step": 0, "data": {"gpt_relevance_pass_rate(%).variant_1": 0.0}}]}, {"dataContainerId": + "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "name": "gpt_relevance_pass_rate(%).variant_0", + "columns": {"gpt_relevance_pass_rate(%).variant_0": "Double"}, "properties": + {"uxMetricType": "azureml.v1.scalar", "dataLocation": null}, "namespace": + null, "standardSchemaId": null, "value": [{"metricId": "a7386e26-1aac-464e-84f0-c1cdaaa4a44d", + "createdUtc": "2023-06-26T03:23:46.325+00:00", "step": 0, "data": {"gpt_relevance_pass_rate(%).variant_0": + 0.0}}]}]}' + headers: + connection: + - keep-alive + content-length: + - '2569' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.174' + status: + code: 200 + message: OK +- request: + body: '{"runId": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "selectRunMetadata": + true, "selectRunDefinition": true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1687749782, "rootRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "createdUtc": "2023-06-26T03:23:02.3017884+00:00", "createdBy": {"userObjectId": + "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "userPuId": "100320007E5EB49B", "userIdp": + null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Zhen + Ruan", "upn": null}, "userId": "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "token": + null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": + 81, "statusRevision": 2, "runUuid": "fab99b4a-07c2-4c77-8ee3-3d3a47102b4a", + "parentRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", "rootRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:41.1688602", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:54:28.0243731+00:00", "duration": + "00:00:41.1688602", "cancelationReason": null, "currentAttemptId": 1, "runId": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "parentRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "experimentId": "ad8dde3a-73f1-49bc-ae71-c7c9b2dc1b9f", "status": "Completed", + "startTimeUtc": "2023-06-26T03:23:05.7657269+00:00", "endTimeUtc": "2023-06-26T03:23:46.9345871+00:00", + "scheduleId": null, "displayName": "test_display_name_aa300d92-c038-4c1b-a91a-a92cba8c36ff", + "name": null, "dataContainerId": "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "description": "test_description_aa300d92-c038-4c1b-a91a-a92cba8c36ff", "hidden": + false, "runType": "azureml.promptflow.EvaluationRun", "runTypeV2": {"orchestrator": + null, "traits": [], "attribution": null, "computeType": "MIR_v2"}, "properties": + {"azureml.promptflow.flow_id": "QnARelevanceEvaluation", "azureml.promptflow.flow_name": + "QnA Relevance Evaluation", "azureml.promptflow.flow_type": "Evaluation", + "azureml.promptflow.source_flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "azureml.promptflow.baseline_variant_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "azureml.promptflow.variant_ids": "", "azureml.promptflow.bulk_test_id": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "azureml.promptflow.flow_experiment_id": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20230614.v1", "azureml.promptflow.total_tokens": "26680"}, "parameters": + {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {"test_tag": "aa300d92-c038-4c1b-a91a-a92cba8c36ff", "hod": "1"}, + "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": [], + "runDefinition": null, "jobSpecification": null, "primaryMetricName": null, + "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + null, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "relevance_score", "Tool": "compute_relevance_score", + "Comment": null, "Inputs": {"question": "${flow.question}", "context": "${flow.context}", + "answer": "${flow.answer}", "max_tokens": "256", "deployment_name": "gpt-35-turbo", + "temperature": "0.0"}, "Api": "chat", "Provider": "AzureOpenAI", "Connection": + "azure_open_ai_connection", "Module": "promptflow.tools.aoai", "Reduce": false}, + {"Name": "concat_scores", "Tool": "concat_results", "Comment": null, "Inputs": + {"relevance_score": "${relevance_score.output}"}, "Api": null, "Provider": + null, "Connection": null, "Module": null, "Reduce": false}, {"Name": "aggregate_variants_results", + "Tool": "aggregate_variants_results", "Comment": null, "Inputs": {"results": + "${concat_scores.output}", "line_number": "${flow.line_number}", "variant_id": + "${flow.variant_id}"}, "Api": null, "Provider": null, "Connection": null, + "Module": null, "Reduce": true}], "Tools": [{"Name": "compute_relevance_score", + "Type": "llm", "Inputs": {"context": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}, "question": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}, "answer": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}}, "Outputs": null, "Description": "This is a llm + tool", "connection_type": null, "Module": null, "class_name": null, "Source": + "compute_relevance_score.jinja2", "LkgCode": null, "Code": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "concat_results", "Type": "python", "Inputs": {"relevance_score": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}}, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "concat_results.py", "LkgCode": null, "Code": "from promptflow + import tool\nimport numpy as np\nimport re\n\n\n@tool\ndef concat_results(relevance_score: + str):\n\n load_list = [{''name'': ''gpt_relevance'', ''score'': relevance_score}]\n score_list + = []\n errors = []\n for item in load_list:\n try:\n score + = item[\"score\"]\n match = re.search(r''\\d'', score)\n if + match:\n score = match.group()\n score = float(score)\n except + Exception as e:\n score = np.nan\n errors.append({\"name\": + item[\"name\"], \"msg\": str(e), \"data\": item[\"score\"]})\n score_list.append({\"name\": + item[\"name\"], \"score\": score})\n\n variant_level_result = {}\n for + item in score_list:\n item_name = str(item[\"name\"])\n variant_level_result[item_name] + = item[\"score\"]\n variant_level_result[item_name + ''_pass_rate''] + = 1 if item[\"score\"] > 3 else 0\n return variant_level_result\n", "Function": + "concat_results", "action_type": null, "provider_config": null, "function_config": + null, "is_builtin": false, "package": null, "package_version": null}, {"Name": + "aggregate_variants_results", "Type": "python", "Inputs": {"variant_id": {"Name": + null, "Type": ["object"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "model_list": null}, "line_number": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}, "results": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}}, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "aggregate_variants_results.py", "LkgCode": null, "Code": + "from typing import List\nfrom promptflow import tool, log_metric\nimport + numpy as np\n\n\n@tool\ndef aggregate_variants_results(variant_id: List[int], + line_number: List[int], results: List[dict]):\n aggregate_results = {}\n for + index in range(len(line_number)):\n result = results[index]\n variant + = variant_id[index]\n if variant not in aggregate_results.keys():\n aggregate_results[variant] + = {}\n item_result = aggregate_results[variant]\n for name, + value in result.items():\n if name not in item_result.keys():\n item_result[name] + = []\n try:\n float_val = float(value)\n except + Exception:\n float_val = np.nan\n item_result[name].append(float_val)\n\n for + name, value in aggregate_results.items():\n variant_id = name\n aggr_item + = aggregate_results[name]\n for name, value in aggr_item.items():\n metric_name + = name\n aggr_item[name] = np.nanmean(value)\n if ''pass_rate'' + in metric_name:\n metric_name = metric_name + \"(%)\"\n aggr_item[name] + = aggr_item[name] * 100.0\n aggr_item[name] = round(aggr_item[name], + 2)\n log_metric(metric_name, aggr_item[name], variant_id=variant_id)\n\n return + aggregate_results\n", "Function": "aggregate_variants_results", "action_type": + null, "provider_config": null, "function_config": null, "is_builtin": false, + "package": null, "package_version": null}], "Codes": {"compute_relevance_score.jinja2": + "System:\nYou are an AI assistant. You will be given the definition of an + evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "concat_results.py": "from promptflow import tool\nimport + numpy as np\nimport re\n\n\n@tool\ndef concat_results(relevance_score: str):\n\n load_list + = [{''name'': ''gpt_relevance'', ''score'': relevance_score}]\n score_list + = []\n errors = []\n for item in load_list:\n try:\n score + = item[\"score\"]\n match = re.search(r''\\d'', score)\n if + match:\n score = match.group()\n score = float(score)\n except + Exception as e:\n score = np.nan\n errors.append({\"name\": + item[\"name\"], \"msg\": str(e), \"data\": item[\"score\"]})\n score_list.append({\"name\": + item[\"name\"], \"score\": score})\n\n variant_level_result = {}\n for + item in score_list:\n item_name = str(item[\"name\"])\n variant_level_result[item_name] + = item[\"score\"]\n variant_level_result[item_name + ''_pass_rate''] + = 1 if item[\"score\"] > 3 else 0\n return variant_level_result\n", "aggregate_variants_results.py": + "from typing import List\nfrom promptflow import tool, log_metric\nimport + numpy as np\n\n\n@tool\ndef aggregate_variants_results(variant_id: List[int], + line_number: List[int], results: List[dict]):\n aggregate_results = {}\n for + index in range(len(line_number)):\n result = results[index]\n variant + = variant_id[index]\n if variant not in aggregate_results.keys():\n aggregate_results[variant] + = {}\n item_result = aggregate_results[variant]\n for name, + value in result.items():\n if name not in item_result.keys():\n item_result[name] + = []\n try:\n float_val = float(value)\n except + Exception:\n float_val = np.nan\n item_result[name].append(float_val)\n\n for + name, value in aggregate_results.items():\n variant_id = name\n aggr_item + = aggregate_results[name]\n for name, value in aggr_item.items():\n metric_name + = name\n aggr_item[name] = np.nanmean(value)\n if ''pass_rate'' + in metric_name:\n metric_name = metric_name + \"(%)\"\n aggr_item[name] + = aggr_item[name] * 100.0\n aggr_item[name] = round(aggr_item[name], + 2)\n log_metric(metric_name, aggr_item[name], variant_id=variant_id)\n\n return + aggregate_results\n"}, "Inputs": {"question": {"Name": null, "Type": "string", + "Default": null, "Description": null, "is_chat_input": false}, "context": + {"Name": null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false}, "answer": {"Name": null, "Type": "string", "Default": null, "Description": + null, "is_chat_input": false}, "line_number": {"Name": null, "Type": "int", + "Default": null, "Description": null, "is_chat_input": false}, "variant_id": + {"Name": null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false}}, "Outputs": {"gpt_relevance": {"Name": null, "Type": "object", "Description": + null, "Reference": "${concat_scores.output.gpt_relevance}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '21284' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.053' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_run.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_run.yaml new file mode 100644 index 00000000000..d5ab5d59bff --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_run.yaml @@ -0,0 +1,207 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.023' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.077' + status: + code: 200 + message: OK +- request: + body: '{"runId": "classification_accuracy_eval_default_20230808_153241_422491", + "selectRunMetadata": true, "selectRunDefinition": true, "selectJobSpecification": + true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '160' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1691479972, "rootRunId": "classification_accuracy_eval_default_20230808_153241_422491", + "createdUtc": "2023-08-08T07:32:52.7610302+00:00", "createdBy": {"userObjectId": + "c05e0746-e125-4cb3-9213-a8b535eacd79", "userPuId": "10032000324F7449", "userIdp": + null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Honglin + Du", "upn": null}, "userId": "c05e0746-e125-4cb3-9213-a8b535eacd79", "token": + null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": + 4, "statusRevision": 2, "runUuid": "59a146b3-caca-4313-8e22-04385a10067b", + "parentRunUuid": null, "rootRunUuid": "59a146b3-caca-4313-8e22-04385a10067b", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:11.2161606", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "74013e41-d17e-462a-8db6-5c0e26c0368c", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "ec6824af-81f6-47fa-a07e-5a04ff0b94e7", + "upn": null}, "lastModifiedUtc": "2023-08-08T07:33:07.3638259+00:00", "duration": + "00:00:11.2161606", "cancelationReason": null, "currentAttemptId": 1, "runId": + "classification_accuracy_eval_default_20230808_153241_422491", "parentRunId": + null, "experimentId": "7238c773-2cc8-4158-832b-27fb908231e4", "status": "Completed", + "startTimeUtc": "2023-08-08T07:32:56.6377618+00:00", "endTimeUtc": "2023-08-08T07:33:07.8539224+00:00", + "scheduleId": null, "displayName": "classification_accuracy_eval_default_20230808_153241_422491", + "name": null, "dataContainerId": "dcid.classification_accuracy_eval_default_20230808_153241_422491", + "description": null, "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": "PromptFlow", + "computeType": "MIR_v2"}, "properties": {"azureml.promptflow.runtime_name": + "demo-mir", "azureml.promptflow.runtime_version": "20230801.v1", "azureml.promptflow.definition_file_name": + "flow.dag.yaml", "azureml.promptflow.input_data": "azureml://datastores/workspaceblobstore/paths/LocalUpload/312cca2af474e5f895013392b6b38f45/data.jsonl", + "azureml.promptflow.input_run_id": "web_classification_default_20230804_143634_056856", + "azureml.promptflow.inputs_mapping": "{\"groundtruth\":\"${data.answer}\",\"prediction\":\"${run.outputs.category}\"}", + "azureml.promptflow.snapshot_id": "e5d50c43-7ad2-4354-9ce4-4f56f0ea9a30", + "azureml.promptflow.total_tokens": "0"}, "parameters": {}, "actionUris": {}, + "scriptName": null, "target": null, "uniqueChildRunComputeTargets": [], "tags": + {}, "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": + [], "runDefinition": null, "jobSpecification": null, "primaryMetricName": + null, "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + false, "queueingInfo": null, "inputs": null, "outputs": {"flow_outputs": {"assetId": + "azureml://locations/eastus/workspaces/00000/data/azureml_classification_accuracy_eval_default_20230808_153241_422491_output_data_flow_outputs/versions/1", + "type": "UriFolder"}}}, "runDefinition": {"Nodes": [{"Name": "grade", "Tool": + "grade.py", "Comment": null, "Inputs": {"groundtruth": "${inputs.groundtruth}", + "prediction": "${inputs.prediction}"}, "Api": null, "Provider": null, "Connection": + null, "Module": null, "Reduce": false}, {"Name": "calculate_accuracy", "Tool": + "calculate_accuracy.py", "Comment": null, "Inputs": {"grades": "${grade.output}"}, + "Api": null, "Provider": null, "Connection": null, "Module": null, "Reduce": + true}], "Tools": [{"Name": "grade.py", "Type": "python", "Inputs": {"groundtruth": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null, "Capabilities": + null}, "prediction": {"Name": null, "Type": ["string"], "Default": null, "Description": + null, "Enum": null, "enabled_by": null, "enabled_by_type": null, "model_list": + null, "Capabilities": null}}, "Outputs": null, "Description": null, "connection_type": + null, "Module": null, "class_name": null, "Source": "grade.py", "LkgCode": + null, "Code": null, "Function": "grade", "action_type": null, "provider_config": + null, "function_config": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "calculate_accuracy.py", "Type": "python", "Inputs": {"grades": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null, "Capabilities": + null}}, "Outputs": null, "Description": null, "connection_type": null, "Module": + null, "class_name": null, "Source": "calculate_accuracy.py", "LkgCode": null, + "Code": null, "Function": "calculate_accuracy", "action_type": null, "provider_config": + null, "function_config": null, "is_builtin": false, "package": null, "package_version": + null}], "Codes": null, "Inputs": {"groundtruth": {"Name": null, "Type": "string", + "Default": "APP", "Description": "Please specify the groundtruth column, which + contains the true label to the outputs that your flow produces.", "is_chat_input": + false}, "prediction": {"Name": null, "Type": "string", "Default": "APP", "Description": + "Please specify the prediction column, which contains the predicted outputs + that your flow produces.", "is_chat_input": false}}, "Outputs": {"grade": + {"Name": null, "Type": "string", "Description": null, "Reference": "${grade.output}", + "evaluation_only": false, "is_chat_output": false}}}, "jobSpecification": + null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '7640' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.077' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_run_details.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_run_details.yaml new file mode 100644 index 00000000000..e955593bbdc --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_show_run_details.yaml @@ -0,0 +1,16837 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.073' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.065' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=24&startIndex=0 + response: + body: + string: '[{"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_3", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 1}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.920659Z", "end_time": "2023-06-26T03:23:06.631Z", + "index": 3, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.088949, + "end_time": 1687749786.513045, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gB0Uro7MNo7VLsobEl6C6wuNA", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.106145, + "end_time": 1687749786.512756, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.571879, "end_time": + 1687749786.579389, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.710341}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_9", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 4}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.926054Z", "end_time": "2023-06-26T03:23:07.491344Z", + "index": 9, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.563752, "end_time": 1687749787.442194, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gycy6JsyndYj9QfjjLiAEGf8T", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.569262, "end_time": 1687749787.441872, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.467644, "end_time": 1687749787.468533, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.56529}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_8", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 4}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.925607Z", "end_time": "2023-06-26T03:23:07.184171Z", + "index": 8, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.090034, "end_time": 1687749786.987327, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6ggEskcWg2m6BxwHeI7ydD9us1", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.103779, "end_time": 1687749786.987023, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.014282, "end_time": 1687749787.019241, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.258564}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_7", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 3}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.925055Z", "end_time": "2023-06-26T03:23:06.628877Z", + "index": 7, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.027456, + "end_time": 1687749786.410611, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gQx365BciPgdTHS3Z8SSo5Jy3", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.032612, + "end_time": 1687749786.410347, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.583318, "end_time": + 1687749786.589686, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.703822}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_6", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 3}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.924371Z", "end_time": "2023-06-26T03:23:06.719055Z", + "index": 6, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.24682, + "end_time": 1687749786.58161, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gAhrgKQqaLHoJ0uSsSwjqUAeJ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.251809, + "end_time": 1687749786.581202, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.635584, "end_time": + 1687749786.636154, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.794684}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_5", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 2}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.923074Z", "end_time": "2023-06-26T03:23:07.072135Z", + "index": 5, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.562227, + "end_time": 1687749786.977697, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gfJnwJYsJFa0B4vOO4NWH4AKQ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.576267, + "end_time": 1687749786.977416, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.02569, "end_time": + 1687749787.026418, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 1.149061}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_4", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 2}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.921433Z", "end_time": "2023-06-26T03:23:06.614149Z", + "index": 4, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749785.973467, + "end_time": 1687749786.400667, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gkBCHI23Fh87kS0mEAbGHx9pA", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749785.979026, + "end_time": 1687749786.400304, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.55911, "end_time": + 1687749786.578687, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.692716}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_24", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 12}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.024507Z", + "end_time": "2023-06-26T03:23:07.438615Z", "index": 24, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.075877, + "end_time": 1687749787.397734, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hiNBgRAR9tZEF3Tf91w5ZguDJ", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.082731, + "end_time": 1687749787.397233, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.419773, "end_time": + 1687749787.420229, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.414108}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_23", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 11}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.795688Z", "end_time": "2023-06-26T03:23:07.892902Z", + "index": 23, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.81345, "end_time": 1687749787.847112, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gwpzAz1GBJO3KzooXGXPZmdUZ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.817235, "end_time": 1687749787.84674, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.871403, "end_time": 1687749787.872944, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.097214}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_22", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 11}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.763166Z", "end_time": "2023-06-26T03:23:07.750976Z", + "index": 22, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.783179, "end_time": 1687749787.707845, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gzVFblcoeGceLtun2KQAVbQCp", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.786329, "end_time": 1687749787.70744, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.729795, "end_time": 1687749787.731401, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 0.98781}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_21", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 10}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.710217Z", + "end_time": "2023-06-26T03:23:07.173797Z", "index": 21, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.767366, + "end_time": 1687749787.086178, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gc4Hz8Rcbao6ddOYwPf2MJNEN", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.770944, + "end_time": 1687749787.08577, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.126275, "end_time": + 1687749787.131026, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.46358}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_20", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 10}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.668734Z", + "end_time": "2023-06-26T03:23:07.226807Z", "index": 20, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.717613, + "end_time": 1687749787.052258, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gGlPv3f7BMy4pTdwkcNBo45GE", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.750599, + "end_time": 1687749787.051899, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.149692, "end_time": + 1687749787.15838, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.558073}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_2", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 1}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918964Z", "end_time": "2023-06-26T03:23:07.077312Z", + "index": 2, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question.", "start_time": 1687749786.165309, "end_time": 1687749787.009966, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g66RvUMNAwHOWEa9HaLvWlvcl", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question."}}], "usage": {"completion_tokens": 20, "prompt_tokens": 660, + "total_tokens": 680}}, "start_time": 1687749786.169575, "end_time": 1687749787.009569, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer + cannot be evaluated for relevance as it does not provide any information related + to the context or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.045358, "end_time": 1687749787.049266, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 680, + "duration": 1.158348}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_19", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 9}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.667555Z", "end_time": "2023-06-26T03:23:07.177107Z", + "index": 19, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.738953, + "end_time": 1687749787.088244, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g1JmBKhhOfuxjJuPHEC9NV6Va", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.759801, + "end_time": 1687749787.087893, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.145433, "end_time": + 1687749787.147615, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.509552}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_18", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 9}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.666035Z", "end_time": "2023-06-26T03:23:07.092812Z", + "index": 18, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.730346, + "end_time": 1687749786.9731, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gLeJuHSbEbXyG94ax9QM9Wah7", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.752874, + "end_time": 1687749786.972756, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.008476, "end_time": + 1687749787.018592, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.426777}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_17", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 8}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.662427Z", + "end_time": "2023-06-26T03:23:07.133144Z", "index": 17, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.735989, + "end_time": 1687749787.016051, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gxaAvoFFwCNBjQXFg5eFxaJjT", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.757651, + "end_time": 1687749787.015671, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.048912, "end_time": + 1687749787.050452, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.470717}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_16", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 8}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.655694Z", + "end_time": "2023-06-26T03:23:07.122365Z", "index": 16, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.70908, + "end_time": 1687749787.031387, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gzTY9fkgqmmnnH5buLGLyEqgk", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.744715, + "end_time": 1687749787.03083, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.069732, "end_time": + 1687749787.073404, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.466671}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_15", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 7}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.928891Z", "end_time": "2023-06-26T03:23:06.773681Z", + "index": 15, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.233921, + "end_time": 1687749786.611735, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gUVd9GNeRfLuj68DVi8haNkdC", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.237554, + "end_time": 1687749786.611181, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.739266, "end_time": + 1687749786.741858, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.84479}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_14", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 7}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.928427Z", "end_time": "2023-06-26T03:23:06.616904Z", + "index": 14, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.069216, + "end_time": 1687749786.432629, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g01pwPNajVfkyrMuJtBWiFy4m", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.073157, + "end_time": 1687749786.432281, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.565565, "end_time": + 1687749786.579587, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.688477}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_13", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 6}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:05.928071Z", + "end_time": "2023-06-26T03:23:06.633826Z", "index": 13, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.139509, + "end_time": 1687749786.532064, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6goLHWskNPhCcmi2Hgir2KXeSk", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.145399, + "end_time": 1687749786.531745, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.598533, "end_time": + 1687749786.599522, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.705755}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_12", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 6}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:05.927162Z", + "end_time": "2023-06-26T03:23:44.779703Z", "index": 12, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.256038, + "end_time": 1687749824.734329, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": null, + "start_time": 1687749786.260484, "end_time": 1687749816.331631, "error": {"message": + "Request timed out: HTTPSConnectionPool(host=''gpt-test-eus.openai.azure.com'', + port=443): Read timed out. (read timeout=30)", "type": "Timeout"}, "children": + null, "node_name": null}, {"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX7IrXJ70uqylcKBlhD0QOKjkcIk", "object": "chat.completion", "created": + 1687749824, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749824.367835, + "end_time": 1687749824.733865, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749824.760058, "end_time": + 1687749824.760631, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 38.852541}, "result": {"gpt_relevance": + 1.0}, "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_11", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 5}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.92671Z", "end_time": "2023-06-26T03:23:07.021941Z", + "index": 11, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749785.950494, "end_time": 1687749786.964181, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gaYSKfcn3vP5vmKnoZSD1kt5T", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749785.965934, "end_time": 1687749786.963842, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.995692, "end_time": 1687749786.996377, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.095231}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_10", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 5}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.926425Z", "end_time": "2023-06-26T03:23:07.028667Z", + "index": 10, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.081766, "end_time": 1687749786.962387, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g9wmlJ75NqtqQzYbdFHhVW80Y", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.091624, "end_time": 1687749786.961904, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.994394, "end_time": 1687749786.996118, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.102242}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_1", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 0}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918425Z", "end_time": "2023-06-26T03:23:07.00271Z", + "index": 1, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Answer cannot be evaluated + as it does not provide any information related to the context or question.", + "start_time": 1687749786.05704, "end_time": 1687749786.943219, "error": null, + "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gsQBSiT1GGMYJ2iZT3cKraIjX", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + as it does not provide any information related to the context or question."}}], + "usage": {"completion_tokens": 18, "prompt_tokens": 660, "total_tokens": 678}}, + "start_time": 1687749786.063365, "end_time": 1687749786.942812, "error": null, + "children": null, "node_name": null}], "node_name": "relevance_score"}, {"name": + "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer cannot + be evaluated as it does not provide any information related to the context + or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.974267, "end_time": 1687749786.97596, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 678, + "duration": 1.084285}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_0", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 0}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918011Z", "end_time": "2023-06-26T03:23:06.652333Z", + "index": 0, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.144201, + "end_time": 1687749786.466651, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gxIBjW27eCGjDfHbh1BVDDVZS", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.152072, + "end_time": 1687749786.466318, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.595585, "end_time": + 1687749786.598282, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.734322}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}]' + headers: + connection: + - keep-alive + content-length: + - '218093' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.458' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=24&startIndex=0 + response: + body: + string: '[{"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_3", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 1}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.920659Z", "end_time": "2023-06-26T03:23:06.631Z", + "index": 3, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.088949, + "end_time": 1687749786.513045, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gB0Uro7MNo7VLsobEl6C6wuNA", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.106145, + "end_time": 1687749786.512756, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.571879, "end_time": + 1687749786.579389, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.710341}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_9", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 4}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.926054Z", "end_time": "2023-06-26T03:23:07.491344Z", + "index": 9, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.563752, "end_time": 1687749787.442194, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gycy6JsyndYj9QfjjLiAEGf8T", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.569262, "end_time": 1687749787.441872, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.467644, "end_time": 1687749787.468533, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.56529}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_8", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 4}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.925607Z", "end_time": "2023-06-26T03:23:07.184171Z", + "index": 8, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.090034, "end_time": 1687749786.987327, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6ggEskcWg2m6BxwHeI7ydD9us1", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.103779, "end_time": 1687749786.987023, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.014282, "end_time": 1687749787.019241, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.258564}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_7", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 3}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.925055Z", "end_time": "2023-06-26T03:23:06.628877Z", + "index": 7, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.027456, + "end_time": 1687749786.410611, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gQx365BciPgdTHS3Z8SSo5Jy3", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.032612, + "end_time": 1687749786.410347, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.583318, "end_time": + 1687749786.589686, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.703822}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_6", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 3}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.924371Z", "end_time": "2023-06-26T03:23:06.719055Z", + "index": 6, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.24682, + "end_time": 1687749786.58161, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gAhrgKQqaLHoJ0uSsSwjqUAeJ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.251809, + "end_time": 1687749786.581202, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.635584, "end_time": + 1687749786.636154, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.794684}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_5", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 2}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.923074Z", "end_time": "2023-06-26T03:23:07.072135Z", + "index": 5, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.562227, + "end_time": 1687749786.977697, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gfJnwJYsJFa0B4vOO4NWH4AKQ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.576267, + "end_time": 1687749786.977416, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.02569, "end_time": + 1687749787.026418, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 1.149061}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_4", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 2}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.921433Z", "end_time": "2023-06-26T03:23:06.614149Z", + "index": 4, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749785.973467, + "end_time": 1687749786.400667, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gkBCHI23Fh87kS0mEAbGHx9pA", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749785.979026, + "end_time": 1687749786.400304, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.55911, "end_time": + 1687749786.578687, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.692716}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_24", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 12}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.024507Z", + "end_time": "2023-06-26T03:23:07.438615Z", "index": 24, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.075877, + "end_time": 1687749787.397734, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hiNBgRAR9tZEF3Tf91w5ZguDJ", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.082731, + "end_time": 1687749787.397233, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.419773, "end_time": + 1687749787.420229, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.414108}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_23", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 11}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.795688Z", "end_time": "2023-06-26T03:23:07.892902Z", + "index": 23, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.81345, "end_time": 1687749787.847112, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gwpzAz1GBJO3KzooXGXPZmdUZ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.817235, "end_time": 1687749787.84674, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.871403, "end_time": 1687749787.872944, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.097214}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_22", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 11}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.763166Z", "end_time": "2023-06-26T03:23:07.750976Z", + "index": 22, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.783179, "end_time": 1687749787.707845, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gzVFblcoeGceLtun2KQAVbQCp", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.786329, "end_time": 1687749787.70744, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.729795, "end_time": 1687749787.731401, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 0.98781}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_21", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 10}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.710217Z", + "end_time": "2023-06-26T03:23:07.173797Z", "index": 21, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.767366, + "end_time": 1687749787.086178, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gc4Hz8Rcbao6ddOYwPf2MJNEN", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.770944, + "end_time": 1687749787.08577, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.126275, "end_time": + 1687749787.131026, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.46358}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_20", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 10}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.668734Z", + "end_time": "2023-06-26T03:23:07.226807Z", "index": 20, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.717613, + "end_time": 1687749787.052258, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gGlPv3f7BMy4pTdwkcNBo45GE", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.750599, + "end_time": 1687749787.051899, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.149692, "end_time": + 1687749787.15838, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.558073}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_2", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 1}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918964Z", "end_time": "2023-06-26T03:23:07.077312Z", + "index": 2, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question.", "start_time": 1687749786.165309, "end_time": 1687749787.009966, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g66RvUMNAwHOWEa9HaLvWlvcl", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question."}}], "usage": {"completion_tokens": 20, "prompt_tokens": 660, + "total_tokens": 680}}, "start_time": 1687749786.169575, "end_time": 1687749787.009569, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer + cannot be evaluated for relevance as it does not provide any information related + to the context or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.045358, "end_time": 1687749787.049266, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 680, + "duration": 1.158348}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_19", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 9}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.667555Z", "end_time": "2023-06-26T03:23:07.177107Z", + "index": 19, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.738953, + "end_time": 1687749787.088244, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g1JmBKhhOfuxjJuPHEC9NV6Va", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.759801, + "end_time": 1687749787.087893, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.145433, "end_time": + 1687749787.147615, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.509552}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_18", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 9}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.666035Z", "end_time": "2023-06-26T03:23:07.092812Z", + "index": 18, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.730346, + "end_time": 1687749786.9731, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gLeJuHSbEbXyG94ax9QM9Wah7", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.752874, + "end_time": 1687749786.972756, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.008476, "end_time": + 1687749787.018592, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.426777}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_17", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 8}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.662427Z", + "end_time": "2023-06-26T03:23:07.133144Z", "index": 17, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.735989, + "end_time": 1687749787.016051, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gxaAvoFFwCNBjQXFg5eFxaJjT", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.757651, + "end_time": 1687749787.015671, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.048912, "end_time": + 1687749787.050452, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.470717}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_16", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 8}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.655694Z", + "end_time": "2023-06-26T03:23:07.122365Z", "index": 16, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.70908, + "end_time": 1687749787.031387, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gzTY9fkgqmmnnH5buLGLyEqgk", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.744715, + "end_time": 1687749787.03083, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.069732, "end_time": + 1687749787.073404, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.466671}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_15", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 7}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.928891Z", "end_time": "2023-06-26T03:23:06.773681Z", + "index": 15, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.233921, + "end_time": 1687749786.611735, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gUVd9GNeRfLuj68DVi8haNkdC", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.237554, + "end_time": 1687749786.611181, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.739266, "end_time": + 1687749786.741858, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.84479}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_14", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 7}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.928427Z", "end_time": "2023-06-26T03:23:06.616904Z", + "index": 14, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.069216, + "end_time": 1687749786.432629, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g01pwPNajVfkyrMuJtBWiFy4m", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.073157, + "end_time": 1687749786.432281, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.565565, "end_time": + 1687749786.579587, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.688477}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_13", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 6}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:05.928071Z", + "end_time": "2023-06-26T03:23:06.633826Z", "index": 13, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.139509, + "end_time": 1687749786.532064, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6goLHWskNPhCcmi2Hgir2KXeSk", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.145399, + "end_time": 1687749786.531745, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.598533, "end_time": + 1687749786.599522, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.705755}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_12", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 6}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:05.927162Z", + "end_time": "2023-06-26T03:23:44.779703Z", "index": 12, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.256038, + "end_time": 1687749824.734329, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": null, + "start_time": 1687749786.260484, "end_time": 1687749816.331631, "error": {"message": + "Request timed out: HTTPSConnectionPool(host=''gpt-test-eus.openai.azure.com'', + port=443): Read timed out. (read timeout=30)", "type": "Timeout"}, "children": + null, "node_name": null}, {"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX7IrXJ70uqylcKBlhD0QOKjkcIk", "object": "chat.completion", "created": + 1687749824, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749824.367835, + "end_time": 1687749824.733865, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749824.760058, "end_time": + 1687749824.760631, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 38.852541}, "result": {"gpt_relevance": + 1.0}, "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_11", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 5}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.92671Z", "end_time": "2023-06-26T03:23:07.021941Z", + "index": 11, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749785.950494, "end_time": 1687749786.964181, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gaYSKfcn3vP5vmKnoZSD1kt5T", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749785.965934, "end_time": 1687749786.963842, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.995692, "end_time": 1687749786.996377, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.095231}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_10", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 5}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.926425Z", "end_time": "2023-06-26T03:23:07.028667Z", + "index": 10, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.081766, "end_time": 1687749786.962387, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g9wmlJ75NqtqQzYbdFHhVW80Y", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.091624, "end_time": 1687749786.961904, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.994394, "end_time": 1687749786.996118, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.102242}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_1", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 0}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918425Z", "end_time": "2023-06-26T03:23:07.00271Z", + "index": 1, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Answer cannot be evaluated + as it does not provide any information related to the context or question.", + "start_time": 1687749786.05704, "end_time": 1687749786.943219, "error": null, + "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gsQBSiT1GGMYJ2iZT3cKraIjX", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + as it does not provide any information related to the context or question."}}], + "usage": {"completion_tokens": 18, "prompt_tokens": 660, "total_tokens": 678}}, + "start_time": 1687749786.063365, "end_time": 1687749786.942812, "error": null, + "children": null, "node_name": null}], "node_name": "relevance_score"}, {"name": + "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer cannot + be evaluated as it does not provide any information related to the context + or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.974267, "end_time": 1687749786.97596, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 678, + "duration": 1.084285}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_0", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 0}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918011Z", "end_time": "2023-06-26T03:23:06.652333Z", + "index": 0, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.144201, + "end_time": 1687749786.466651, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gxIBjW27eCGjDfHbh1BVDDVZS", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.152072, + "end_time": 1687749786.466318, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.595585, "end_time": + 1687749786.598282, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.734322}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}]' + headers: + connection: + - keep-alive + content-length: + - '218093' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.498' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=49&startIndex=25 + response: + body: + string: '[{"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_39", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 19}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.600618Z", + "end_time": "2023-06-26T03:23:07.966686Z", "index": 39, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.636971, + "end_time": 1687749787.917402, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6h81khhINTMWNNNt7Y4MOcQMLx", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.641939, + "end_time": 1687749787.917081, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.941406, "end_time": + 1687749787.948302, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.366068}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_38", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 19}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.517609Z", + "end_time": "2023-06-26T03:23:07.906946Z", "index": 38, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.53948, + "end_time": 1687749787.863873, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hJnVhqXXqIo5rWDIXDk5BIFaS", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.543053, + "end_time": 1687749787.863507, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.888554, "end_time": + 1687749787.888879, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.389337}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_37", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 18}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.513976Z", + "end_time": "2023-06-26T03:23:07.801068Z", "index": 37, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.532642, + "end_time": 1687749787.762109, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hKeWZelQdlUziHbxEoUkTpAAO", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.536601, + "end_time": 1687749787.761755, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.782522, "end_time": + 1687749787.782904, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.287092}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_36", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 18}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.464303Z", + "end_time": "2023-06-26T03:23:07.873152Z", "index": 36, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.484236, + "end_time": 1687749787.829911, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6h2NB1lAd7qrefUgwlkMeiXOuo", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.488007, + "end_time": 1687749787.829589, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.853181, "end_time": + 1687749787.853628, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.408849}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_35", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 17}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.277668Z", + "end_time": "2023-06-26T03:23:07.688656Z", "index": 35, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.308445, + "end_time": 1687749787.608156, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6h4BPBDV6zdn4Dmd6glSfB0cUz", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.315291, + "end_time": 1687749787.607512, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.646492, "end_time": + 1687749787.650441, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.410988}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_34", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 17}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.244627Z", + "end_time": "2023-06-26T03:23:07.683952Z", "index": 34, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.294603, + "end_time": 1687749787.629657, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hcVpIlwCKKc63fGxAUX98EyBn", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.300537, + "end_time": 1687749787.629341, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.657245, "end_time": + 1687749787.657872, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.439325}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_33", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 16}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.238665Z", "end_time": "2023-06-26T03:23:07.677226Z", + "index": 33, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.277229, + "end_time": 1687749787.620707, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hfMXKFhhvqms8TfH1HAyb6E2E", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.287339, + "end_time": 1687749787.620161, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.644707, "end_time": + 1687749787.649858, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.438561}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_32", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 16}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.235871Z", "end_time": "2023-06-26T03:23:07.681755Z", + "index": 32, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.270281, + "end_time": 1687749787.604052, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hM887dIsIoqd3rJ1wG7XLPLrl", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.279947, + "end_time": 1687749787.603768, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.635278, "end_time": + 1687749787.637439, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.445884}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_31", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "POI", "context": "Url", "variant_id": "variant_0", "line_number": + 15}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.222741Z", "end_time": "2023-06-26T03:23:07.690508Z", + "index": 31, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "POI", "max_tokens": 256, "deployment_name": "gpt-35-turbo", + "temperature": 0.0}, "output": "1", "start_time": 1687749787.246031, "end_time": + 1687749787.593134, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + POI\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hbkVupGlHFwhYXAg4qAKgZnVu", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.262001, + "end_time": 1687749787.592816, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.651766, "end_time": + 1687749787.654521, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.467767}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_30", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "POI", "context": "Url", "variant_id": "variant_1", "line_number": + 15}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.176166Z", "end_time": "2023-06-26T03:23:08.130807Z", + "index": 30, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "POI", "max_tokens": 256, "deployment_name": "gpt-35-turbo", + "temperature": 0.0}, "output": "Answer cannot be evaluated for relevance as + it does not provide any information related to the context or question.", + "start_time": 1687749787.24707, "end_time": 1687749788.085521, "error": null, + "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + POI\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hUBRhYUA3onfYro8SNmdimMgR", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question."}}], "usage": {"completion_tokens": 20, "prompt_tokens": 661, + "total_tokens": 681}}, "start_time": 1687749787.256342, "end_time": 1687749788.085071, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer + cannot be evaluated for relevance as it does not provide any information related + to the context or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749788.108912, "end_time": 1687749788.109308, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 0.954641}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_29", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Text content", "variant_id": "variant_0", + "line_number": 14}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.130352Z", + "end_time": "2023-06-26T03:23:07.57444Z", "index": 29, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.15786, + "end_time": 1687749787.528061, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hyZJ8WwNLU3kWmkBGHExHM0yF", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.180753, + "end_time": 1687749787.527725, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.549187, "end_time": + 1687749787.549579, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.444088}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_28", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Text content", "variant_id": "variant_1", + "line_number": 14}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.128734Z", + "end_time": "2023-06-26T03:23:07.685942Z", "index": 28, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.2436, + "end_time": 1687749787.598607, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hFnpEbQV9Apbsg2yHx3WA20eg", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.250454, + "end_time": 1687749787.598243, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.648029, "end_time": + 1687749787.652107, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.557208}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_27", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 13}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.124795Z", "end_time": "2023-06-26T03:23:08.146481Z", + "index": 27, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749787.221249, "end_time": 1687749788.103616, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hjWeJWvatnOyWwo6BmtVyEnUa", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749787.233218, "end_time": 1687749788.102928, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749788.124877, "end_time": 1687749788.125462, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.021686}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_26", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 13}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.118155Z", "end_time": "2023-06-26T03:23:07.943037Z", + "index": 26, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should be evaluated based on the given context and question. Please provide + a valid answer.", "start_time": 1687749787.153758, "end_time": 1687749787.882669, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hkby6fk5IIQ3OxWBOaEfX8e0a", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should be evaluated based on the given context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 22, "prompt_tokens": 660, + "total_tokens": 682}}, "start_time": 1687749787.164658, "end_time": 1687749787.882348, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should be evaluated based on the given context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.912621, "end_time": 1687749787.913078, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 682, + "duration": 0.824882}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_25", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 12}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.068383Z", + "end_time": "2023-06-26T03:23:07.497208Z", "index": 25, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.160348, + "end_time": 1687749787.450765, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hMzxzRtNJ7j6vyld45MK7HKdP", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.169289, + "end_time": 1687749787.450473, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.473604, "end_time": + 1687749787.474146, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.428825}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}]' + headers: + connection: + - keep-alive + content-length: + - '128035' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.450' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=74&startIndex=50 + response: + body: + string: '[]' + headers: + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '0.403' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=24&startIndex=0 + response: + body: + string: '[{"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_9", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 4}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.926054Z", "end_time": "2023-06-26T03:23:07.491344Z", + "index": 9, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.563752, "end_time": 1687749787.442194, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gycy6JsyndYj9QfjjLiAEGf8T", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.569262, "end_time": 1687749787.441872, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.467644, "end_time": 1687749787.468533, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.56529}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_8", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 4}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.925607Z", "end_time": "2023-06-26T03:23:07.184171Z", + "index": 8, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.090034, "end_time": 1687749786.987327, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6ggEskcWg2m6BxwHeI7ydD9us1", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.103779, "end_time": 1687749786.987023, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.014282, "end_time": 1687749787.019241, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.258564}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_7", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 3}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.925055Z", "end_time": "2023-06-26T03:23:06.628877Z", + "index": 7, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.027456, + "end_time": 1687749786.410611, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gQx365BciPgdTHS3Z8SSo5Jy3", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.032612, + "end_time": 1687749786.410347, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.583318, "end_time": + 1687749786.589686, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.703822}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_6", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 3}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.924371Z", "end_time": "2023-06-26T03:23:06.719055Z", + "index": 6, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.24682, + "end_time": 1687749786.58161, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gAhrgKQqaLHoJ0uSsSwjqUAeJ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.251809, + "end_time": 1687749786.581202, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.635584, "end_time": + 1687749786.636154, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.794684}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_5", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 2}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.923074Z", "end_time": "2023-06-26T03:23:07.072135Z", + "index": 5, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.562227, + "end_time": 1687749786.977697, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gfJnwJYsJFa0B4vOO4NWH4AKQ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.576267, + "end_time": 1687749786.977416, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.02569, "end_time": + 1687749787.026418, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 1.149061}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_4", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 2}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.921433Z", "end_time": "2023-06-26T03:23:06.614149Z", + "index": 4, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749785.973467, + "end_time": 1687749786.400667, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gkBCHI23Fh87kS0mEAbGHx9pA", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749785.979026, + "end_time": 1687749786.400304, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.55911, "end_time": + 1687749786.578687, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.692716}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_3", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 1}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.920659Z", "end_time": "2023-06-26T03:23:06.631Z", + "index": 3, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.088949, + "end_time": 1687749786.513045, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gB0Uro7MNo7VLsobEl6C6wuNA", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.106145, + "end_time": 1687749786.512756, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.571879, "end_time": + 1687749786.579389, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.710341}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_24", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 12}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.024507Z", + "end_time": "2023-06-26T03:23:07.438615Z", "index": 24, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.075877, + "end_time": 1687749787.397734, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hiNBgRAR9tZEF3Tf91w5ZguDJ", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.082731, + "end_time": 1687749787.397233, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.419773, "end_time": + 1687749787.420229, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.414108}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_23", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 11}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.795688Z", "end_time": "2023-06-26T03:23:07.892902Z", + "index": 23, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.81345, "end_time": 1687749787.847112, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gwpzAz1GBJO3KzooXGXPZmdUZ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.817235, "end_time": 1687749787.84674, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.871403, "end_time": 1687749787.872944, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.097214}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_22", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 11}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.763166Z", "end_time": "2023-06-26T03:23:07.750976Z", + "index": 22, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.783179, "end_time": 1687749787.707845, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gzVFblcoeGceLtun2KQAVbQCp", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.786329, "end_time": 1687749787.70744, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.729795, "end_time": 1687749787.731401, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 0.98781}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_21", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 10}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.710217Z", + "end_time": "2023-06-26T03:23:07.173797Z", "index": 21, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.767366, + "end_time": 1687749787.086178, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gc4Hz8Rcbao6ddOYwPf2MJNEN", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.770944, + "end_time": 1687749787.08577, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.126275, "end_time": + 1687749787.131026, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.46358}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_20", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 10}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.668734Z", + "end_time": "2023-06-26T03:23:07.226807Z", "index": 20, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.717613, + "end_time": 1687749787.052258, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gGlPv3f7BMy4pTdwkcNBo45GE", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.750599, + "end_time": 1687749787.051899, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.149692, "end_time": + 1687749787.15838, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.558073}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_2", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 1}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918964Z", "end_time": "2023-06-26T03:23:07.077312Z", + "index": 2, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question.", "start_time": 1687749786.165309, "end_time": 1687749787.009966, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g66RvUMNAwHOWEa9HaLvWlvcl", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question."}}], "usage": {"completion_tokens": 20, "prompt_tokens": 660, + "total_tokens": 680}}, "start_time": 1687749786.169575, "end_time": 1687749787.009569, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer + cannot be evaluated for relevance as it does not provide any information related + to the context or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.045358, "end_time": 1687749787.049266, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 680, + "duration": 1.158348}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_19", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 9}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.667555Z", "end_time": "2023-06-26T03:23:07.177107Z", + "index": 19, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.738953, + "end_time": 1687749787.088244, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g1JmBKhhOfuxjJuPHEC9NV6Va", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.759801, + "end_time": 1687749787.087893, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.145433, "end_time": + 1687749787.147615, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.509552}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_18", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 9}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.666035Z", "end_time": "2023-06-26T03:23:07.092812Z", + "index": 18, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.730346, + "end_time": 1687749786.9731, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gLeJuHSbEbXyG94ax9QM9Wah7", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.752874, + "end_time": 1687749786.972756, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.008476, "end_time": + 1687749787.018592, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.426777}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_17", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 8}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.662427Z", + "end_time": "2023-06-26T03:23:07.133144Z", "index": 17, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.735989, + "end_time": 1687749787.016051, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gxaAvoFFwCNBjQXFg5eFxaJjT", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.757651, + "end_time": 1687749787.015671, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.048912, "end_time": + 1687749787.050452, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.470717}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_16", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 8}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.655694Z", + "end_time": "2023-06-26T03:23:07.122365Z", "index": 16, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.70908, + "end_time": 1687749787.031387, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gzTY9fkgqmmnnH5buLGLyEqgk", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.744715, + "end_time": 1687749787.03083, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.069732, "end_time": + 1687749787.073404, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.466671}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_15", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 7}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.928891Z", "end_time": "2023-06-26T03:23:06.773681Z", + "index": 15, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.233921, + "end_time": 1687749786.611735, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gUVd9GNeRfLuj68DVi8haNkdC", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.237554, + "end_time": 1687749786.611181, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.739266, "end_time": + 1687749786.741858, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.84479}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_14", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 7}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.928427Z", "end_time": "2023-06-26T03:23:06.616904Z", + "index": 14, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.069216, + "end_time": 1687749786.432629, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g01pwPNajVfkyrMuJtBWiFy4m", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.073157, + "end_time": 1687749786.432281, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.565565, "end_time": + 1687749786.579587, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.688477}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_13", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 6}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:05.928071Z", + "end_time": "2023-06-26T03:23:06.633826Z", "index": 13, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.139509, + "end_time": 1687749786.532064, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6goLHWskNPhCcmi2Hgir2KXeSk", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.145399, + "end_time": 1687749786.531745, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.598533, "end_time": + 1687749786.599522, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.705755}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_12", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 6}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:05.927162Z", + "end_time": "2023-06-26T03:23:44.779703Z", "index": 12, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.256038, + "end_time": 1687749824.734329, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": null, + "start_time": 1687749786.260484, "end_time": 1687749816.331631, "error": {"message": + "Request timed out: HTTPSConnectionPool(host=''gpt-test-eus.openai.azure.com'', + port=443): Read timed out. (read timeout=30)", "type": "Timeout"}, "children": + null, "node_name": null}, {"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX7IrXJ70uqylcKBlhD0QOKjkcIk", "object": "chat.completion", "created": + 1687749824, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749824.367835, + "end_time": 1687749824.733865, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749824.760058, "end_time": + 1687749824.760631, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 38.852541}, "result": {"gpt_relevance": + 1.0}, "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_11", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 5}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.92671Z", "end_time": "2023-06-26T03:23:07.021941Z", + "index": 11, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749785.950494, "end_time": 1687749786.964181, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gaYSKfcn3vP5vmKnoZSD1kt5T", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749785.965934, "end_time": 1687749786.963842, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.995692, "end_time": 1687749786.996377, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.095231}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_10", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 5}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.926425Z", "end_time": "2023-06-26T03:23:07.028667Z", + "index": 10, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.081766, "end_time": 1687749786.962387, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g9wmlJ75NqtqQzYbdFHhVW80Y", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.091624, "end_time": 1687749786.961904, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.994394, "end_time": 1687749786.996118, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.102242}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_1", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 0}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918425Z", "end_time": "2023-06-26T03:23:07.00271Z", + "index": 1, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Answer cannot be evaluated + as it does not provide any information related to the context or question.", + "start_time": 1687749786.05704, "end_time": 1687749786.943219, "error": null, + "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gsQBSiT1GGMYJ2iZT3cKraIjX", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + as it does not provide any information related to the context or question."}}], + "usage": {"completion_tokens": 18, "prompt_tokens": 660, "total_tokens": 678}}, + "start_time": 1687749786.063365, "end_time": 1687749786.942812, "error": null, + "children": null, "node_name": null}], "node_name": "relevance_score"}, {"name": + "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer cannot + be evaluated as it does not provide any information related to the context + or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.974267, "end_time": 1687749786.97596, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 678, + "duration": 1.084285}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_0", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 0}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918011Z", "end_time": "2023-06-26T03:23:06.652333Z", + "index": 0, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.144201, + "end_time": 1687749786.466651, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gxIBjW27eCGjDfHbh1BVDDVZS", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.152072, + "end_time": 1687749786.466318, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.595585, "end_time": + 1687749786.598282, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.734322}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}]' + headers: + connection: + - keep-alive + content-length: + - '218093' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.508' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=49&startIndex=25 + response: + body: + string: '[{"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_39", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 19}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.600618Z", + "end_time": "2023-06-26T03:23:07.966686Z", "index": 39, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.636971, + "end_time": 1687749787.917402, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6h81khhINTMWNNNt7Y4MOcQMLx", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.641939, + "end_time": 1687749787.917081, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.941406, "end_time": + 1687749787.948302, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.366068}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_38", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 19}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.517609Z", + "end_time": "2023-06-26T03:23:07.906946Z", "index": 38, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.53948, + "end_time": 1687749787.863873, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hJnVhqXXqIo5rWDIXDk5BIFaS", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.543053, + "end_time": 1687749787.863507, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.888554, "end_time": + 1687749787.888879, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.389337}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_37", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 18}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.513976Z", + "end_time": "2023-06-26T03:23:07.801068Z", "index": 37, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.532642, + "end_time": 1687749787.762109, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hKeWZelQdlUziHbxEoUkTpAAO", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.536601, + "end_time": 1687749787.761755, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.782522, "end_time": + 1687749787.782904, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.287092}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_36", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 18}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.464303Z", + "end_time": "2023-06-26T03:23:07.873152Z", "index": 36, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.484236, + "end_time": 1687749787.829911, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6h2NB1lAd7qrefUgwlkMeiXOuo", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.488007, + "end_time": 1687749787.829589, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.853181, "end_time": + 1687749787.853628, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.408849}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_35", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 17}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.277668Z", + "end_time": "2023-06-26T03:23:07.688656Z", "index": 35, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.308445, + "end_time": 1687749787.608156, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6h4BPBDV6zdn4Dmd6glSfB0cUz", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.315291, + "end_time": 1687749787.607512, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.646492, "end_time": + 1687749787.650441, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.410988}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_34", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 17}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.244627Z", + "end_time": "2023-06-26T03:23:07.683952Z", "index": 34, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.294603, + "end_time": 1687749787.629657, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hcVpIlwCKKc63fGxAUX98EyBn", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.300537, + "end_time": 1687749787.629341, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.657245, "end_time": + 1687749787.657872, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.439325}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_33", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 16}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.238665Z", "end_time": "2023-06-26T03:23:07.677226Z", + "index": 33, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.277229, + "end_time": 1687749787.620707, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hfMXKFhhvqms8TfH1HAyb6E2E", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.287339, + "end_time": 1687749787.620161, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.644707, "end_time": + 1687749787.649858, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.438561}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_32", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 16}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.235871Z", "end_time": "2023-06-26T03:23:07.681755Z", + "index": 32, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.270281, + "end_time": 1687749787.604052, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hM887dIsIoqd3rJ1wG7XLPLrl", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.279947, + "end_time": 1687749787.603768, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.635278, "end_time": + 1687749787.637439, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.445884}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_31", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "POI", "context": "Url", "variant_id": "variant_0", "line_number": + 15}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.222741Z", "end_time": "2023-06-26T03:23:07.690508Z", + "index": 31, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "POI", "max_tokens": 256, "deployment_name": "gpt-35-turbo", + "temperature": 0.0}, "output": "1", "start_time": 1687749787.246031, "end_time": + 1687749787.593134, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + POI\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hbkVupGlHFwhYXAg4qAKgZnVu", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.262001, + "end_time": 1687749787.592816, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.651766, "end_time": + 1687749787.654521, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.467767}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_30", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "POI", "context": "Url", "variant_id": "variant_1", "line_number": + 15}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.176166Z", "end_time": "2023-06-26T03:23:08.130807Z", + "index": 30, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "POI", "max_tokens": 256, "deployment_name": "gpt-35-turbo", + "temperature": 0.0}, "output": "Answer cannot be evaluated for relevance as + it does not provide any information related to the context or question.", + "start_time": 1687749787.24707, "end_time": 1687749788.085521, "error": null, + "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + POI\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hUBRhYUA3onfYro8SNmdimMgR", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question."}}], "usage": {"completion_tokens": 20, "prompt_tokens": 661, + "total_tokens": 681}}, "start_time": 1687749787.256342, "end_time": 1687749788.085071, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer + cannot be evaluated for relevance as it does not provide any information related + to the context or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749788.108912, "end_time": 1687749788.109308, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 0.954641}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_29", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Text content", "variant_id": "variant_0", + "line_number": 14}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.130352Z", + "end_time": "2023-06-26T03:23:07.57444Z", "index": 29, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.15786, + "end_time": 1687749787.528061, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hyZJ8WwNLU3kWmkBGHExHM0yF", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.180753, + "end_time": 1687749787.527725, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.549187, "end_time": + 1687749787.549579, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.444088}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_28", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Text content", "variant_id": "variant_1", + "line_number": 14}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.128734Z", + "end_time": "2023-06-26T03:23:07.685942Z", "index": 28, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.2436, + "end_time": 1687749787.598607, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hFnpEbQV9Apbsg2yHx3WA20eg", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.250454, + "end_time": 1687749787.598243, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.648029, "end_time": + 1687749787.652107, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.557208}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_27", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 13}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.124795Z", "end_time": "2023-06-26T03:23:08.146481Z", + "index": 27, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749787.221249, "end_time": 1687749788.103616, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hjWeJWvatnOyWwo6BmtVyEnUa", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749787.233218, "end_time": 1687749788.102928, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749788.124877, "end_time": 1687749788.125462, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.021686}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_26", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 13}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.118155Z", "end_time": "2023-06-26T03:23:07.943037Z", + "index": 26, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should be evaluated based on the given context and question. Please provide + a valid answer.", "start_time": 1687749787.153758, "end_time": 1687749787.882669, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hkby6fk5IIQ3OxWBOaEfX8e0a", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should be evaluated based on the given context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 22, "prompt_tokens": 660, + "total_tokens": 682}}, "start_time": 1687749787.164658, "end_time": 1687749787.882348, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should be evaluated based on the given context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.912621, "end_time": 1687749787.913078, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 682, + "duration": 0.824882}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_25", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 12}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.068383Z", + "end_time": "2023-06-26T03:23:07.497208Z", "index": 25, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.160348, + "end_time": 1687749787.450765, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hMzxzRtNJ7j6vyld45MK7HKdP", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.169289, + "end_time": 1687749787.450473, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.473604, "end_time": + 1687749787.474146, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.428825}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}]' + headers: + connection: + - keep-alive + content-length: + - '128035' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.479' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=74&startIndex=50 + response: + body: + string: '[]' + headers: + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '0.481' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=24&startIndex=0 + response: + body: + string: '[{"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_3", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 1}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.920659Z", "end_time": "2023-06-26T03:23:06.631Z", + "index": 3, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.088949, + "end_time": 1687749786.513045, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gB0Uro7MNo7VLsobEl6C6wuNA", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.106145, + "end_time": 1687749786.512756, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.571879, "end_time": + 1687749786.579389, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.710341}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_9", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 4}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.926054Z", "end_time": "2023-06-26T03:23:07.491344Z", + "index": 9, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.563752, "end_time": 1687749787.442194, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gycy6JsyndYj9QfjjLiAEGf8T", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.569262, "end_time": 1687749787.441872, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.467644, "end_time": 1687749787.468533, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.56529}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_8", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 4}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.925607Z", "end_time": "2023-06-26T03:23:07.184171Z", + "index": 8, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.090034, "end_time": 1687749786.987327, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6ggEskcWg2m6BxwHeI7ydD9us1", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.103779, "end_time": 1687749786.987023, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.014282, "end_time": 1687749787.019241, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.258564}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_7", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 3}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.925055Z", "end_time": "2023-06-26T03:23:06.628877Z", + "index": 7, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.027456, + "end_time": 1687749786.410611, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gQx365BciPgdTHS3Z8SSo5Jy3", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.032612, + "end_time": 1687749786.410347, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.583318, "end_time": + 1687749786.589686, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.703822}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_6", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 3}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.924371Z", "end_time": "2023-06-26T03:23:06.719055Z", + "index": 6, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.24682, + "end_time": 1687749786.58161, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gAhrgKQqaLHoJ0uSsSwjqUAeJ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.251809, + "end_time": 1687749786.581202, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.635584, "end_time": + 1687749786.636154, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.794684}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_5", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 2}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.923074Z", "end_time": "2023-06-26T03:23:07.072135Z", + "index": 5, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.562227, + "end_time": 1687749786.977697, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gfJnwJYsJFa0B4vOO4NWH4AKQ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.576267, + "end_time": 1687749786.977416, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.02569, "end_time": + 1687749787.026418, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 1.149061}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_4", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 2}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.921433Z", "end_time": "2023-06-26T03:23:06.614149Z", + "index": 4, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749785.973467, + "end_time": 1687749786.400667, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gkBCHI23Fh87kS0mEAbGHx9pA", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749785.979026, + "end_time": 1687749786.400304, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.55911, "end_time": + 1687749786.578687, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.692716}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_24", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 12}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.024507Z", + "end_time": "2023-06-26T03:23:07.438615Z", "index": 24, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.075877, + "end_time": 1687749787.397734, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hiNBgRAR9tZEF3Tf91w5ZguDJ", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.082731, + "end_time": 1687749787.397233, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.419773, "end_time": + 1687749787.420229, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.414108}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_23", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 11}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.795688Z", "end_time": "2023-06-26T03:23:07.892902Z", + "index": 23, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.81345, "end_time": 1687749787.847112, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gwpzAz1GBJO3KzooXGXPZmdUZ", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.817235, "end_time": 1687749787.84674, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.871403, "end_time": 1687749787.872944, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.097214}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_22", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 11}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.763166Z", "end_time": "2023-06-26T03:23:07.750976Z", + "index": 22, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.783179, "end_time": 1687749787.707845, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gzVFblcoeGceLtun2KQAVbQCp", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.786329, "end_time": 1687749787.70744, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.729795, "end_time": 1687749787.731401, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 0.98781}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_21", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 10}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.710217Z", + "end_time": "2023-06-26T03:23:07.173797Z", "index": 21, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.767366, + "end_time": 1687749787.086178, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gc4Hz8Rcbao6ddOYwPf2MJNEN", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.770944, + "end_time": 1687749787.08577, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.126275, "end_time": + 1687749787.131026, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.46358}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_20", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 10}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.668734Z", + "end_time": "2023-06-26T03:23:07.226807Z", "index": 20, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.717613, + "end_time": 1687749787.052258, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gGlPv3f7BMy4pTdwkcNBo45GE", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.750599, + "end_time": 1687749787.051899, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.149692, "end_time": + 1687749787.15838, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.558073}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_2", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 1}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918964Z", "end_time": "2023-06-26T03:23:07.077312Z", + "index": 2, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question.", "start_time": 1687749786.165309, "end_time": 1687749787.009966, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g66RvUMNAwHOWEa9HaLvWlvcl", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question."}}], "usage": {"completion_tokens": 20, "prompt_tokens": 660, + "total_tokens": 680}}, "start_time": 1687749786.169575, "end_time": 1687749787.009569, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer + cannot be evaluated for relevance as it does not provide any information related + to the context or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.045358, "end_time": 1687749787.049266, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 680, + "duration": 1.158348}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_19", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 9}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.667555Z", "end_time": "2023-06-26T03:23:07.177107Z", + "index": 19, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.738953, + "end_time": 1687749787.088244, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g1JmBKhhOfuxjJuPHEC9NV6Va", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.759801, + "end_time": 1687749787.087893, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.145433, "end_time": + 1687749787.147615, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.509552}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_18", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 9}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:06.666035Z", "end_time": "2023-06-26T03:23:07.092812Z", + "index": 18, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.730346, + "end_time": 1687749786.9731, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gLeJuHSbEbXyG94ax9QM9Wah7", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.752874, + "end_time": 1687749786.972756, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.008476, "end_time": + 1687749787.018592, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.426777}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_17", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 8}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.662427Z", + "end_time": "2023-06-26T03:23:07.133144Z", "index": 17, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.735989, + "end_time": 1687749787.016051, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gxaAvoFFwCNBjQXFg5eFxaJjT", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.757651, + "end_time": 1687749787.015671, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.048912, "end_time": + 1687749787.050452, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.470717}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_16", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 8}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:06.655694Z", + "end_time": "2023-06-26T03:23:07.122365Z", "index": 16, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.70908, + "end_time": 1687749787.031387, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gzTY9fkgqmmnnH5buLGLyEqgk", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.744715, + "end_time": 1687749787.03083, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.069732, "end_time": + 1687749787.073404, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.466671}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_15", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 7}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.928891Z", "end_time": "2023-06-26T03:23:06.773681Z", + "index": 15, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.233921, + "end_time": 1687749786.611735, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gUVd9GNeRfLuj68DVi8haNkdC", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.237554, + "end_time": 1687749786.611181, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.739266, "end_time": + 1687749786.741858, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.84479}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_14", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 7}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.928427Z", "end_time": "2023-06-26T03:23:06.616904Z", + "index": 14, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.069216, + "end_time": 1687749786.432629, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g01pwPNajVfkyrMuJtBWiFy4m", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.073157, + "end_time": 1687749786.432281, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.565565, "end_time": + 1687749786.579587, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.688477}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_13", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 6}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:05.928071Z", + "end_time": "2023-06-26T03:23:06.633826Z", "index": 13, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.139509, + "end_time": 1687749786.532064, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6goLHWskNPhCcmi2Hgir2KXeSk", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749786.145399, + "end_time": 1687749786.531745, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.598533, "end_time": + 1687749786.599522, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.705755}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_12", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 6}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:05.927162Z", + "end_time": "2023-06-26T03:23:44.779703Z", "index": 12, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.256038, + "end_time": 1687749824.734329, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": null, + "start_time": 1687749786.260484, "end_time": 1687749816.331631, "error": {"message": + "Request timed out: HTTPSConnectionPool(host=''gpt-test-eus.openai.azure.com'', + port=443): Read timed out. (read timeout=30)", "type": "Timeout"}, "children": + null, "node_name": null}, {"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX7IrXJ70uqylcKBlhD0QOKjkcIk", "object": "chat.completion", "created": + 1687749824, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749824.367835, + "end_time": 1687749824.733865, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749824.760058, "end_time": + 1687749824.760631, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 38.852541}, "result": {"gpt_relevance": + 1.0}, "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_11", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 5}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.92671Z", "end_time": "2023-06-26T03:23:07.021941Z", + "index": 11, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749785.950494, "end_time": 1687749786.964181, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gaYSKfcn3vP5vmKnoZSD1kt5T", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749785.965934, "end_time": 1687749786.963842, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.995692, "end_time": 1687749786.996377, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.095231}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_10", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 5}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.926425Z", "end_time": "2023-06-26T03:23:07.028667Z", + "index": 10, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749786.081766, "end_time": 1687749786.962387, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6g9wmlJ75NqtqQzYbdFHhVW80Y", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749786.091624, "end_time": 1687749786.961904, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.994394, "end_time": 1687749786.996118, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.102242}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_1", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_0", "line_number": + 0}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918425Z", "end_time": "2023-06-26T03:23:07.00271Z", + "index": 1, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Answer cannot be evaluated + as it does not provide any information related to the context or question.", + "start_time": 1687749786.05704, "end_time": 1687749786.943219, "error": null, + "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gsQBSiT1GGMYJ2iZT3cKraIjX", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + as it does not provide any information related to the context or question."}}], + "usage": {"completion_tokens": 18, "prompt_tokens": 660, "total_tokens": 678}}, + "start_time": 1687749786.063365, "end_time": 1687749786.942812, "error": null, + "children": null, "node_name": null}], "node_name": "relevance_score"}, {"name": + "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer cannot + be evaluated as it does not provide any information related to the context + or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749786.974267, "end_time": 1687749786.97596, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 678, + "duration": 1.084285}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_0", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Channel", "context": "Url", "variant_id": "variant_1", "line_number": + 0}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:05.918011Z", "end_time": "2023-06-26T03:23:06.652333Z", + "index": 0, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Channel", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749786.144201, + "end_time": 1687749786.466651, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Channel\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6gxIBjW27eCGjDfHbh1BVDDVZS", "object": "chat.completion", "created": + 1687749786, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 660, "total_tokens": 661}}, "start_time": 1687749786.152072, + "end_time": 1687749786.466318, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749786.595585, "end_time": + 1687749786.598282, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 661, "duration": 0.734322}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}]' + headers: + connection: + - keep-alive + content-length: + - '218093' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.464' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=49&startIndex=25 + response: + body: + string: '[{"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_39", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 19}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.600618Z", + "end_time": "2023-06-26T03:23:07.966686Z", "index": 39, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.636971, + "end_time": 1687749787.917402, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6h81khhINTMWNNNt7Y4MOcQMLx", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.641939, + "end_time": 1687749787.917081, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.941406, "end_time": + 1687749787.948302, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.366068}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_38", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 19}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.517609Z", + "end_time": "2023-06-26T03:23:07.906946Z", "index": 38, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.53948, + "end_time": 1687749787.863873, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hJnVhqXXqIo5rWDIXDk5BIFaS", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.543053, + "end_time": 1687749787.863507, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.888554, "end_time": + 1687749787.888879, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.389337}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_37", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 18}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.513976Z", + "end_time": "2023-06-26T03:23:07.801068Z", "index": 37, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.532642, + "end_time": 1687749787.762109, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hKeWZelQdlUziHbxEoUkTpAAO", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.536601, + "end_time": 1687749787.761755, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.782522, "end_time": + 1687749787.782904, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.287092}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_36", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 18}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.464303Z", + "end_time": "2023-06-26T03:23:07.873152Z", "index": 36, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.484236, + "end_time": 1687749787.829911, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6h2NB1lAd7qrefUgwlkMeiXOuo", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.488007, + "end_time": 1687749787.829589, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.853181, "end_time": + 1687749787.853628, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.408849}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_35", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 17}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.277668Z", + "end_time": "2023-06-26T03:23:07.688656Z", "index": 35, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.308445, + "end_time": 1687749787.608156, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6h4BPBDV6zdn4Dmd6glSfB0cUz", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.315291, + "end_time": 1687749787.607512, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.646492, "end_time": + 1687749787.650441, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.410988}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_34", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_1", + "line_number": 17}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.244627Z", + "end_time": "2023-06-26T03:23:07.683952Z", "index": 34, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.294603, + "end_time": 1687749787.629657, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hcVpIlwCKKc63fGxAUX98EyBn", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.300537, + "end_time": 1687749787.629341, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.657245, "end_time": + 1687749787.657872, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.439325}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_33", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_0", "line_number": + 16}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.238665Z", "end_time": "2023-06-26T03:23:07.677226Z", + "index": 33, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.277229, + "end_time": 1687749787.620707, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hfMXKFhhvqms8TfH1HAyb6E2E", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.287339, + "end_time": 1687749787.620161, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.644707, "end_time": + 1687749787.649858, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.438561}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_32", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "None", "context": "Text content", "variant_id": "variant_1", "line_number": + 16}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.235871Z", "end_time": "2023-06-26T03:23:07.681755Z", + "index": 32, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "None", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.270281, + "end_time": 1687749787.604052, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + None\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hM887dIsIoqd3rJ1wG7XLPLrl", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.279947, + "end_time": 1687749787.603768, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.635278, "end_time": + 1687749787.637439, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.445884}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_31", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "POI", "context": "Url", "variant_id": "variant_0", "line_number": + 15}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": null, "parent_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.222741Z", "end_time": "2023-06-26T03:23:07.690508Z", + "index": 31, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "POI", "max_tokens": 256, "deployment_name": "gpt-35-turbo", + "temperature": 0.0}, "output": "1", "start_time": 1687749787.246031, "end_time": + 1687749787.593134, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + POI\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hbkVupGlHFwhYXAg4qAKgZnVu", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.262001, + "end_time": 1687749787.592816, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.651766, "end_time": + 1687749787.654521, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.467767}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_30", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "POI", "context": "Url", "variant_id": "variant_1", "line_number": + 15}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.176166Z", "end_time": "2023-06-26T03:23:08.130807Z", + "index": 30, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "POI", "max_tokens": 256, "deployment_name": "gpt-35-turbo", + "temperature": 0.0}, "output": "Answer cannot be evaluated for relevance as + it does not provide any information related to the context or question.", + "start_time": 1687749787.24707, "end_time": 1687749788.085521, "error": null, + "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + POI\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hUBRhYUA3onfYro8SNmdimMgR", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Answer cannot be evaluated + for relevance as it does not provide any information related to the context + or question."}}], "usage": {"completion_tokens": 20, "prompt_tokens": 661, + "total_tokens": 681}}, "start_time": 1687749787.256342, "end_time": 1687749788.085071, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Answer + cannot be evaluated for relevance as it does not provide any information related + to the context or question."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749788.108912, "end_time": 1687749788.109308, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 0.954641}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_29", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Text content", "variant_id": "variant_0", + "line_number": 14}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.130352Z", + "end_time": "2023-06-26T03:23:07.57444Z", "index": 29, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.15786, + "end_time": 1687749787.528061, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hyZJ8WwNLU3kWmkBGHExHM0yF", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.180753, + "end_time": 1687749787.527725, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.549187, "end_time": + 1687749787.549579, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.444088}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_28", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Text content", "variant_id": "variant_1", + "line_number": 14}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.128734Z", + "end_time": "2023-06-26T03:23:07.685942Z", "index": 28, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.2436, + "end_time": 1687749787.598607, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hFnpEbQV9Apbsg2yHx3WA20eg", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.250454, + "end_time": 1687749787.598243, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.648029, "end_time": + 1687749787.652107, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.557208}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_27", + "status": "Completed", "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_0", "line_number": + 13}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.124795Z", "end_time": "2023-06-26T03:23:08.146481Z", + "index": 27, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer.", "start_time": 1687749787.221249, "end_time": 1687749788.103616, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hjWeJWvatnOyWwo6BmtVyEnUa", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should provide information related to the context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 21, "prompt_tokens": 660, + "total_tokens": 681}}, "start_time": 1687749787.233218, "end_time": 1687749788.102928, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should provide information related to the context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749788.124877, "end_time": 1687749788.125462, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 681, + "duration": 1.021686}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_26", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Profile", "context": "Url", "variant_id": "variant_1", "line_number": + 13}, "output": {"gpt_relevance": "NaN"}, "metrics": null, "request": null, + "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "source_run_id": null, "flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "start_time": "2023-06-26T03:23:07.118155Z", "end_time": "2023-06-26T03:23:07.943037Z", + "index": 26, "api_calls": [{"name": "relevance_score", "type": "Tool", "inputs": + {"prompt": "System:\nYou are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Url", "answer": "Profile", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "Invalid answer. The answer + should be evaluated based on the given context and question. Please provide + a valid answer.", "start_time": 1687749787.153758, "end_time": 1687749787.882669, + "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Url\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Profile\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hkby6fk5IIQ3OxWBOaEfX8e0a", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "Invalid answer. The answer + should be evaluated based on the given context and question. Please provide + a valid answer."}}], "usage": {"completion_tokens": 22, "prompt_tokens": 660, + "total_tokens": 682}}, "start_time": 1687749787.164658, "end_time": 1687749787.882348, + "error": null, "children": null, "node_name": null}], "node_name": "relevance_score"}, + {"name": "concat_scores", "type": "Tool", "inputs": {"relevance_score": "Invalid + answer. The answer should be evaluated based on the given context and question. + Please provide a valid answer."}, "output": {"gpt_relevance": "NaN", "gpt_relevance_pass_rate": + 0}, "start_time": 1687749787.912621, "end_time": 1687749787.913078, "error": + null, "children": [], "node_name": "concat_scores"}], "variant_id": "", "name": + "", "description": "", "tags": null, "system_metrics": {"total_tokens": 682, + "duration": 0.824882}, "result": {"gpt_relevance": "NaN"}, "upload_metrics": + false}, {"run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74_25", "status": "Completed", + "error": null, "inputs": {"question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "answer": "Academic", "context": "Text content", "variant_id": "variant_0", + "line_number": 12}, "output": {"gpt_relevance": 1.0}, "metrics": null, "request": + null, "parent_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "root_run_id": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "source_run_id": null, "flow_id": + "42f7ee16-2243-475b-af21-e8fabd4abd15", "start_time": "2023-06-26T03:23:07.068383Z", + "end_time": "2023-06-26T03:23:07.497208Z", "index": 25, "api_calls": [{"name": + "relevance_score", "type": "Tool", "inputs": {"prompt": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "question": "https://www.youtube.com/watch?v=o5ZQyXaAv1g", + "context": "Text content", "answer": "Academic", "max_tokens": 256, "deployment_name": + "gpt-35-turbo", "temperature": 0.0}, "output": "1", "start_time": 1687749787.160348, + "end_time": 1687749787.450765, "error": null, "children": [{"name": "openai.api_resources.chat_completion.ChatCompletion.create", + "type": "LLM", "inputs": {"engine": "gpt-35-turbo", "messages": [{"role": + "system", "content": "You are an AI assistant. You will be given the definition + of an evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric."}, {"role": "user", "content": "Relevance measures how + well the answer addresses the main aspects of the question, based on the context. + Consider whether all and only the important aspects are contained in the answer + when evaluating relevance. Given the context and question, score the relevance + of the answer between one to five stars using the following rating scale:\nOne + star: the answer completely lacks relevance\nTwo stars: the answer mostly + lacks relevance\nThree stars: the answer is partially relevant\nFour stars: + the answer is mostly relevant\nFive stars: the answer has perfect relevance\n\nThis + rating value should always be an integer between 1 and 5. So the rating produced + should be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born + physicist and chemist who pioneered research on radioactivity and was the + first woman to win a Nobel Prize.\nquestion: What field did Marie Curie excel + in?\nanswer: Marie Curie was a renowned painter who focused mainly on impressionist + styles and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: Text content\nquestion: https://www.youtube.com/watch?v=o5ZQyXaAv1g\nanswer: + Academic\nstars:"}], "temperature": 0.0, "top_p": 1.0, "n": 1, "stream": false, + "stop": null, "max_tokens": 256, "presence_penalty": 0.0, "frequency_penalty": + 0.0, "logit_bias": {}, "user": "", "request_timeout": 30, "api_base": "https://gpt-test-eus.openai.azure.com/", + "api_type": "azure", "api_version": "2023-03-15-preview"}, "output": {"id": + "chatcmpl-7VX6hMzxzRtNJ7j6vyld45MK7HKdP", "object": "chat.completion", "created": + 1687749787, "model": "gpt-35-turbo", "choices": [{"index": 0, "finish_reason": + "stop", "message": {"role": "assistant", "content": "1"}}], "usage": {"completion_tokens": + 1, "prompt_tokens": 661, "total_tokens": 662}}, "start_time": 1687749787.169289, + "end_time": 1687749787.450473, "error": null, "children": null, "node_name": + null}], "node_name": "relevance_score"}, {"name": "concat_scores", "type": + "Tool", "inputs": {"relevance_score": "1"}, "output": {"gpt_relevance": 1.0, + "gpt_relevance_pass_rate": 0}, "start_time": 1687749787.473604, "end_time": + 1687749787.474146, "error": null, "children": [], "node_name": "concat_scores"}], + "variant_id": "", "name": "", "description": "", "tags": null, "system_metrics": + {"total_tokens": 662, "duration": 0.428825}, "result": {"gpt_relevance": 1.0}, + "upload_metrics": false}]' + headers: + connection: + - keep-alive + content-length: + - '128035' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.454' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/childRuns?endIndex=74&startIndex=50 + response: + body: + string: '[]' + headers: + connection: + - keep-alive + content-length: + - '2' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + x-content-type-options: + - nosniff + x-request-time: + - '0.362' + status: + code: 200 + message: OK +- request: + body: '{"runId": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "selectRunMetadata": + true, "selectRunDefinition": true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1687749782, "rootRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "createdUtc": "2023-06-26T03:23:02.3017884+00:00", "createdBy": {"userObjectId": + "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "userPuId": "100320007E5EB49B", "userIdp": + null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Zhen + Ruan", "upn": null}, "userId": "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "token": + null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": + 78, "statusRevision": 2, "runUuid": "fab99b4a-07c2-4c77-8ee3-3d3a47102b4a", + "parentRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", "rootRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:41.1688602", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T08:49:39.7241411+00:00", "duration": + "00:00:41.1688602", "cancelationReason": null, "currentAttemptId": 1, "runId": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "parentRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "experimentId": "ad8dde3a-73f1-49bc-ae71-c7c9b2dc1b9f", "status": "Completed", + "startTimeUtc": "2023-06-26T03:23:05.7657269+00:00", "endTimeUtc": "2023-06-26T03:23:46.9345871+00:00", + "scheduleId": null, "displayName": "test_display_name_a05e7c55-9cf1-4909-b3e5-5df713c86ecc", + "name": null, "dataContainerId": "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "description": "test_description_a05e7c55-9cf1-4909-b3e5-5df713c86ecc", "hidden": + false, "runType": "azureml.promptflow.EvaluationRun", "runTypeV2": {"orchestrator": + null, "traits": [], "attribution": null, "computeType": "MIR_v2"}, "properties": + {"azureml.promptflow.flow_id": "QnARelevanceEvaluation", "azureml.promptflow.flow_name": + "QnA Relevance Evaluation", "azureml.promptflow.flow_type": "Evaluation", + "azureml.promptflow.source_flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "azureml.promptflow.baseline_variant_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "azureml.promptflow.variant_ids": "", "azureml.promptflow.bulk_test_id": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "azureml.promptflow.flow_experiment_id": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20230614.v1", "azureml.promptflow.total_tokens": "26680"}, "parameters": + {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {"test_tag": "a05e7c55-9cf1-4909-b3e5-5df713c86ecc", "hod": "1"}, + "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": [], + "runDefinition": null, "jobSpecification": null, "primaryMetricName": null, + "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + null, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "relevance_score", "Tool": "compute_relevance_score", + "Comment": null, "Inputs": {"question": "${flow.question}", "context": "${flow.context}", + "answer": "${flow.answer}", "max_tokens": "256", "deployment_name": "gpt-35-turbo", + "temperature": "0.0"}, "Api": "chat", "Provider": "AzureOpenAI", "Connection": + "azure_open_ai_connection", "Module": "promptflow.tools.aoai", "Reduce": false}, + {"Name": "concat_scores", "Tool": "concat_results", "Comment": null, "Inputs": + {"relevance_score": "${relevance_score.output}"}, "Api": null, "Provider": + null, "Connection": null, "Module": null, "Reduce": false}, {"Name": "aggregate_variants_results", + "Tool": "aggregate_variants_results", "Comment": null, "Inputs": {"results": + "${concat_scores.output}", "line_number": "${flow.line_number}", "variant_id": + "${flow.variant_id}"}, "Api": null, "Provider": null, "Connection": null, + "Module": null, "Reduce": true}], "Tools": [{"Name": "compute_relevance_score", + "Type": "llm", "Inputs": {"context": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}, "question": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}, "answer": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}}, "Outputs": null, "Description": "This is a llm + tool", "connection_type": null, "Module": null, "class_name": null, "Source": + "compute_relevance_score.jinja2", "LkgCode": null, "Code": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "concat_results", "Type": "python", "Inputs": {"relevance_score": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}}, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "concat_results.py", "LkgCode": null, "Code": "from promptflow + import tool\nimport numpy as np\nimport re\n\n\n@tool\ndef concat_results(relevance_score: + str):\n\n load_list = [{''name'': ''gpt_relevance'', ''score'': relevance_score}]\n score_list + = []\n errors = []\n for item in load_list:\n try:\n score + = item[\"score\"]\n match = re.search(r''\\d'', score)\n if + match:\n score = match.group()\n score = float(score)\n except + Exception as e:\n score = np.nan\n errors.append({\"name\": + item[\"name\"], \"msg\": str(e), \"data\": item[\"score\"]})\n score_list.append({\"name\": + item[\"name\"], \"score\": score})\n\n variant_level_result = {}\n for + item in score_list:\n item_name = str(item[\"name\"])\n variant_level_result[item_name] + = item[\"score\"]\n variant_level_result[item_name + ''_pass_rate''] + = 1 if item[\"score\"] > 3 else 0\n return variant_level_result\n", "Function": + "concat_results", "action_type": null, "provider_config": null, "function_config": + null, "is_builtin": false, "package": null, "package_version": null}, {"Name": + "aggregate_variants_results", "Type": "python", "Inputs": {"variant_id": {"Name": + null, "Type": ["object"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "model_list": null}, "line_number": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}, "results": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}}, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "aggregate_variants_results.py", "LkgCode": null, "Code": + "from typing import List\nfrom promptflow import tool, log_metric\nimport + numpy as np\n\n\n@tool\ndef aggregate_variants_results(variant_id: List[int], + line_number: List[int], results: List[dict]):\n aggregate_results = {}\n for + index in range(len(line_number)):\n result = results[index]\n variant + = variant_id[index]\n if variant not in aggregate_results.keys():\n aggregate_results[variant] + = {}\n item_result = aggregate_results[variant]\n for name, + value in result.items():\n if name not in item_result.keys():\n item_result[name] + = []\n try:\n float_val = float(value)\n except + Exception:\n float_val = np.nan\n item_result[name].append(float_val)\n\n for + name, value in aggregate_results.items():\n variant_id = name\n aggr_item + = aggregate_results[name]\n for name, value in aggr_item.items():\n metric_name + = name\n aggr_item[name] = np.nanmean(value)\n if ''pass_rate'' + in metric_name:\n metric_name = metric_name + \"(%)\"\n aggr_item[name] + = aggr_item[name] * 100.0\n aggr_item[name] = round(aggr_item[name], + 2)\n log_metric(metric_name, aggr_item[name], variant_id=variant_id)\n\n return + aggregate_results\n", "Function": "aggregate_variants_results", "action_type": + null, "provider_config": null, "function_config": null, "is_builtin": false, + "package": null, "package_version": null}], "Codes": {"compute_relevance_score.jinja2": + "System:\nYou are an AI assistant. You will be given the definition of an + evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "concat_results.py": "from promptflow import tool\nimport + numpy as np\nimport re\n\n\n@tool\ndef concat_results(relevance_score: str):\n\n load_list + = [{''name'': ''gpt_relevance'', ''score'': relevance_score}]\n score_list + = []\n errors = []\n for item in load_list:\n try:\n score + = item[\"score\"]\n match = re.search(r''\\d'', score)\n if + match:\n score = match.group()\n score = float(score)\n except + Exception as e:\n score = np.nan\n errors.append({\"name\": + item[\"name\"], \"msg\": str(e), \"data\": item[\"score\"]})\n score_list.append({\"name\": + item[\"name\"], \"score\": score})\n\n variant_level_result = {}\n for + item in score_list:\n item_name = str(item[\"name\"])\n variant_level_result[item_name] + = item[\"score\"]\n variant_level_result[item_name + ''_pass_rate''] + = 1 if item[\"score\"] > 3 else 0\n return variant_level_result\n", "aggregate_variants_results.py": + "from typing import List\nfrom promptflow import tool, log_metric\nimport + numpy as np\n\n\n@tool\ndef aggregate_variants_results(variant_id: List[int], + line_number: List[int], results: List[dict]):\n aggregate_results = {}\n for + index in range(len(line_number)):\n result = results[index]\n variant + = variant_id[index]\n if variant not in aggregate_results.keys():\n aggregate_results[variant] + = {}\n item_result = aggregate_results[variant]\n for name, + value in result.items():\n if name not in item_result.keys():\n item_result[name] + = []\n try:\n float_val = float(value)\n except + Exception:\n float_val = np.nan\n item_result[name].append(float_val)\n\n for + name, value in aggregate_results.items():\n variant_id = name\n aggr_item + = aggregate_results[name]\n for name, value in aggr_item.items():\n metric_name + = name\n aggr_item[name] = np.nanmean(value)\n if ''pass_rate'' + in metric_name:\n metric_name = metric_name + \"(%)\"\n aggr_item[name] + = aggr_item[name] * 100.0\n aggr_item[name] = round(aggr_item[name], + 2)\n log_metric(metric_name, aggr_item[name], variant_id=variant_id)\n\n return + aggregate_results\n"}, "Inputs": {"question": {"Name": null, "Type": "string", + "Default": null, "Description": null, "is_chat_input": false}, "context": + {"Name": null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false}, "answer": {"Name": null, "Type": "string", "Default": null, "Description": + null, "is_chat_input": false}, "line_number": {"Name": null, "Type": "int", + "Default": null, "Description": null, "is_chat_input": false}, "variant_id": + {"Name": null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false}}, "Outputs": {"gpt_relevance": {"Name": null, "Type": "object", "Description": + null, "Reference": "${concat_scores.output.gpt_relevance}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '21284' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.036' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_failed_run_logs.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_failed_run_logs.yaml new file mode 100644 index 00000000000..5ef0d012234 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_failed_run_logs.yaml @@ -0,0 +1,406 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.021' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.076' + status: + code: 200 + message: OK +- request: + body: '{"runId": "3dfd077a-f071-443e-9c4e-d41531710950", "selectRunMetadata": + true, "selectRunDefinition": true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1689673888, "rootRunId": "fd68a549-2027-4f0f-9f21-adc39cc86c94", + "createdUtc": "2023-07-18T09:51:28.1405441+00:00", "createdBy": {"userObjectId": + "c05e0746-e125-4cb3-9213-a8b535eacd79", "userPuId": "10032000324F7449", "userIdp": + null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Honglin + Du", "upn": null}, "userId": "c05e0746-e125-4cb3-9213-a8b535eacd79", "token": + null, "tokenExpiryTimeUtc": null, "error": {"error": {"code": "UserError", + "severity": null, "message": "Input ''question'' in line 0 is not provided + for flow ''Simple_mock_answer''.", "messageFormat": "", "messageParameters": + {}, "referenceCode": "Executor", "detailsUri": null, "target": null, "details": + [], "innerError": {"code": "ValidationError", "innerError": {"code": "InvalidFlowRequest", + "innerError": {"code": "InputNotFound", "innerError": null}}}, "debugInfo": + {"type": "InputNotFound", "message": null, "stackTrace": "Traceback (most + recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/executor.py\", + line 243, in exec_request_raw\n return self._route_request_raw(raw_request, + raise_ex=raise_ex)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/executor.py\", + line 316, in _route_request_raw\n return self._exec_batch_request(\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/executor.py\", + line 393, in _exec_batch_request\n run_infos = self._exec_batch_request_inner(\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/executor.py\", + line 654, in _exec_batch_request_inner\n batch_inputs = FlowRequestValidator.ensure_batch_inputs_type(batch_request.flow, + batch_request.batch_inputs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_request_validator.py\", + line 99, in ensure_batch_inputs_type\n return [cls.ensure_flow_inputs_type(flow, + inputs, idx) for idx, inputs in enumerate(batch_inputs)]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_request_validator.py\", + line 99, in \n return [cls.ensure_flow_inputs_type(flow, inputs, + idx) for idx, inputs in enumerate(batch_inputs)]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_request_validator.py\", + line 192, in ensure_flow_inputs_type\n raise InputNotFound(\npromptflow.executor.flow_request_validator.InputNotFound: + Input ''question'' in line 0 is not provided for flow ''Simple_mock_answer''.\n", + "innerException": null, "data": null, "errorResponse": null}, "additionalInfo": + null}, "correlation": null, "environment": null, "location": null, "time": + "2023-07-18T09:51:31.441881+00:00", "componentName": "promptflow/20230710.v2"}, + "warnings": null, "revision": 4, "statusRevision": 2, "runUuid": "555c524f-0fa8-47d7-bf0b-cc6db82ab734", + "parentRunUuid": "01cdc8fe-2bfd-40be-817c-7ae28282e7a7", "rootRunUuid": "01cdc8fe-2bfd-40be-817c-7ae28282e7a7", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:00.4131767", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "c05e0746-e125-4cb3-9213-a8b535eacd79", + "userPuId": "10032000324F7449", "userIdp": null, "userAltSecId": null, "userIss": + "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", "userTenantId": + "00000000-0000-0000-0000-000000000000", "userName": "Honglin Du", "upn": null}, + "lastModifiedUtc": "2023-07-18T09:51:28.1405441+00:00", "duration": "00:00:00.4131767", + "cancelationReason": null, "currentAttemptId": 1, "runId": "3dfd077a-f071-443e-9c4e-d41531710950", + "parentRunId": "fd68a549-2027-4f0f-9f21-adc39cc86c94", "experimentId": "64956f20-fc4f-4b13-aa32-8c52f722b94f", + "status": "Failed", "startTimeUtc": "2023-07-18T09:51:31.2748721+00:00", "endTimeUtc": + "2023-07-18T09:51:31.6880488+00:00", "scheduleId": null, "displayName": "Simple_mock_answer-bulktest-variant_0-fd68a549-2027-4f0f-9f21-adc39cc86c94", + "name": null, "dataContainerId": "dcid.3dfd077a-f071-443e-9c4e-d41531710950", + "description": "", "hidden": false, "runType": "azureml.promptflow.FlowRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": null, "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.flow_id": "6f0d05fd-2cc1-495a-be6d-c60c3f3b1f14", + "azureml.promptflow.flow_type": "Default", "azureml.promptflow.variant_id": + "variant_0", "azureml.promptflow.baseline_variant_run_id": "3dfd077a-f071-443e-9c4e-d41531710950", + "azureml.promptflow.bulk_test_id": "fd68a549-2027-4f0f-9f21-adc39cc86c94", + "azureml.promptflow.flow_experiment_id": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20230710.v2"}, "parameters": {}, "actionUris": {}, "scriptName": null, "target": + null, "uniqueChildRunComputeTargets": [], "tags": {}, "settings": {}, "services": + {}, "inputDatasets": [], "outputDatasets": [], "runDefinition": null, "jobSpecification": + null, "primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri": + null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + null, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "hello_prompt", "Tool": "hello_prompt", "Comment": null, + "Inputs": {}, "Api": null, "Provider": null, "Connection": null, "Module": + null, "Reduce": false}, {"Name": "echo_my_prompt", "Tool": "echo_my_prompt", + "Comment": null, "Inputs": {"input1": "${hello_prompt.output}"}, "Api": null, + "Provider": null, "Connection": null, "Module": null, "Reduce": false}], "Tools": + [{"Name": "hello_prompt", "Type": "prompt", "Inputs": {"question": {"Name": + null, "Type": ["string"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "model_list": null, "Capabilities": + null}}, "Outputs": null, "Description": null, "connection_type": null, "Module": + null, "class_name": null, "Source": null, "LkgCode": "Q: {{question}}", "Code": + "Q: {{question}}", "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "echo_my_prompt", "Type": "python", "Inputs": {"input1": {"Name": + "input1", "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null, "Capabilities": + null}}, "Outputs": null, "Description": null, "connection_type": null, "Module": + null, "class_name": null, "Source": null, "LkgCode": "from promptflow import + tool\nimport time\nimport sys\n# The inputs section will change based on the + arguments of the tool function, after you save the code\n# Adding type to + arguments and return value will help the system show the types properly\n# + Please update the function name/signature per need\n\n\n@tool\ndef my_python_tool(input1: + str) -> str:\n print(f\"@@@ My input data is {input1}...\")\n sys.stderr.write(f\"### + This is an error message {input1}\") \n return \"Prompt: \" + input1\n", + "Code": "from promptflow import tool\nimport time\nimport sys\n# The inputs + section will change based on the arguments of the tool function, after you + save the code\n# Adding type to arguments and return value will help the system + show the types properly\n# Please update the function name/signature per need\n\n\n@tool\ndef + my_python_tool(input1: str) -> str:\n print(f\"@@@ My input data is {input1}...\")\n sys.stderr.write(f\"### + This is an error message {input1}\") \n return \"Prompt: \" + input1\n", + "Function": "my_python_tool", "action_type": null, "provider_config": null, + "function_config": null, "is_builtin": false, "package": null, "package_version": + null}], "Codes": null, "Inputs": {"question": {"Name": null, "Type": "string", + "Default": null, "Description": null, "is_chat_input": false}}, "Outputs": + {"output_prompt": {"Name": null, "Type": "string", "Description": null, "Reference": + "${echo_my_prompt.output}", "evaluation_only": false, "is_chat_output": false}}}, + "jobSpecification": null, "systemSettings": null}' + headers: + connection: + - keep-alive + content-length: + - '10114' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.045' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/3dfd077a-f071-443e-9c4e-d41531710950/logContent + response: + body: + string: '"2023-07-18 09:51:28 +0000 12930 promptflow-runtime INFO [3dfd077a-f071-443e-9c4e-d41531710950] + Receiving submit flow request b8f430bb-726a-40cf-a346-22cc0e420e7a: {\"flow_id\": + \"6f0d05fd-2cc1-495a-be6d-c60c3f3b1f14\", \"flow_run_id\": \"3dfd077a-f071-443e-9c4e-d41531710950\", + \"submission_data\": {\"flow\": {\"id\": \"6f0d05fd-2cc1-495a-be6d-c60c3f3b1f14\", + \"name\": \"Simple_mock_answer\", \"nodes\": [{\"name\": \"hello_prompt\", + \"tool\": \"hello_prompt\"}, {\"name\": \"echo_my_prompt\", \"tool\": \"echo_my_prompt\", + \"inputs\": {\"input1\": {\"value\": \"hello_prompt\", \"value_type\": \"NodeReference\", + \"section\": \"output\"}}}], \"inputs\": {\"question\": {\"type\": \"string\"}}, + \"outputs\": {\"output_prompt\": {\"type\": \"string\", \"reference\": {\"value\": + \"echo_my_prompt\", \"value_type\": \"NodeReference\", \"section\": \"output\"}}}, + \"tools\": [{\"name\": \"hello_prompt\", \"type\": \"prompt\", \"inputs\": + {\"question\": {\"type\": [\"string\"]}}, \"code\": \"Q: {{question}}\"}, + {\"name\": \"echo_my_prompt\", \"type\": \"python\", \"inputs\": {\"input1\": + {\"type\": [\"string\"]}}, \"code\": \"from promptflow import tool\\nimport + time\\nimport sys\\n# The inputs section will change based on the arguments + of the tool function, after you save the code\\n# Adding type to arguments + and return value will help the system show the types properly\\n# Please update + the function name/signature per need\\n\\n\\n@tool\\ndef my_python_tool(input1: + str) -> str:\\n print(f\\\"@@@ My input data is {input1}...\\\")\\n sys.stderr.write(f\\\"### + This is an error message {input1}\\\") \\n return \\\"Prompt: \\\" + input1\\n\", + \"function\": \"my_python_tool\"}]}, \"name\": \"Simple_mock_answer-bulktest\", + \"baseline_variant_id\": \"variant_0\", \"bulk_test_id\": \"fd68a549-2027-4f0f-9f21-adc39cc86c94\"}, + \"run_mode\": 3, \"created_by\": {\"user_object_id\": \"c05e0746-e125-4cb3-9213-a8b535eacd79\", + \"user_tenant_id\": \"00000000-0000-0000-0000-000000000000\", \"user_name\": + \"Honglin Du\"}, \"batch_data_input\": {\"data_uri\": \"azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/web_classification_data/versions/1\"}, + \"run_id_to_log_path\": {\"fd68a549-2027-4f0f-9f21-adc39cc86c94\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/PromptflowLogs/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/6f0d05fd-2cc1-495a-be6d-c60c3f3b1f14/fd68a549-2027-4f0f-9f21-adc39cc86c94/fd68a549-2027-4f0f-9f21-adc39cc86c94.txt?sv=2021-10-04&se=2023-07-25T09%3A51%3A27Z&sr=b&sp=rw&sig=**data_scrubbed** + \"3dfd077a-f071-443e-9c4e-d41531710950\": \"https://promptfloweast4063704120.blob.core.windows.net/azureml/PromptflowLogs/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/6f0d05fd-2cc1-495a-be6d-c60c3f3b1f14/fd68a549-2027-4f0f-9f21-adc39cc86c94/flowRuns/3dfd077a-f071-443e-9c4e-d41531710950.txt?sv=2021-10-04&se=2023-07-25T09%3A51%3A27Z&sr=b&sp=rw&sig=**data_scrubbed** + \"app_insights_instrumentation_key\": \"InstrumentationKey=**data_scrubbed**;IngestionEndpoint=https://eastus-6.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/\"}\n2023-07-18 + 09:51:28 +0000 12930 promptflow-runtime INFO Init from request using + default credential\n2023-07-18 09:51:28 +0000 12930 promptflow-runtime INFO Initializing + mlclient from request finished in 0.02988982805982232 seconds\n2023-07-18 + 09:51:28 +0000 12930 promptflow-runtime INFO Getting storage account + key finished in 0.09090099297463894 seconds\n2023-07-18 09:51:28 +0000 12930 + promptflow-runtime WARNING Failed to get storage account key: (AuthorizationFailed) + The client ''74013e41-d17e-462a-8db6-5c0e26c0368c'' with object id ''74013e41-d17e-462a-8db6-5c0e26c0368c'' + does not have authorization to perform action ''Microsoft.MachineLearningServices/workspaces/00000/action'' + over scope ''/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus'' + or the scope is invalid. If access was recently granted, please refresh your + credentials.\nCode: AuthorizationFailed\nMessage: The client ''74013e41-d17e-462a-8db6-5c0e26c0368c'' + with object id ''74013e41-d17e-462a-8db6-5c0e26c0368c'' does not have authorization + to perform action ''Microsoft.MachineLearningServices/workspaces/00000/action'' + over scope ''/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus'' + or the scope is invalid. If access was recently granted, please refresh your + credentials.\n2023-07-18 09:51:29 +0000 12930 promptflow-runtime INFO Workspace + config from mlclient: {''storage_account'': ''promptfloweast4063704120'', + ''mt_service_endpoint'': ''https://eastus.api.azureml.ms'', ''resource_group'': + ''promptflow'', ''subscription_id'': ''96aede12-2f73-41cb-b983-6d11a904839b'', + ''workspace_name'': ''promptflow-eastus'', ''workspace_id'': ''3e123da1-f9a5-4c91-9234-8d9ffbb39ff5''}\n2023-07-18 + 09:51:29 +0000 12930 promptflow-runtime INFO Getting workspace id finished + in 0.26978495717048645 seconds\n2023-07-18 09:51:29 +0000 12930 promptflow-runtime + INFO Starting to check process 15853 status\n2023-07-18 09:51:29 +0000 12930 + promptflow-runtime INFO Start checking run status for bulk run 3dfd077a-f071-443e-9c4e-d41531710950\n2023-07-18 + 09:51:29 +0000 15853 promptflow-runtime INFO [12930--15853] Start processing + flow......\n2023-07-18 09:51:29 +0000 15853 promptflow-runtime INFO Using + AzureMLRunStorage with compute identity.\n2023-07-18 09:51:29 +0000 12930 + promptflow-runtime INFO Running .get_storage_from_config_with_retry + at 0x7fdbcdefd790>, 3 more tries to go.\n2023-07-18 09:51:29 +0000 15853 + promptflow-runtime INFO [diagnostic] Token expire on: 2023-07-19 03:02:30, + oid: 74013e41-d17e-462a-8db6-5c0e26c0368c, scp: None\n2023-07-18 09:51:29 + +0000 15853 promptflow-runtime INFO [diagnostic] Token expire on: 2023-07-19 + 08:42:04, oid: 74013e41-d17e-462a-8db6-5c0e26c0368c, scp: None\n2023-07-18 + 09:51:29 +0000 12930 promptflow-runtime INFO Using AzureMLRunStorage + with compute identity.\n2023-07-18 09:51:29 +0000 15853 promptflow-runtime + INFO Set token diagnostic finished in 0.02079575788229704 seconds\n2023-07-18 + 09:51:29 +0000 12930 promptflow-runtime INFO [diagnostic] Token expire + on: 2023-07-19 03:02:30, oid: 74013e41-d17e-462a-8db6-5c0e26c0368c, scp: None\n2023-07-18 + 09:51:29 +0000 15853 promptflow-runtime INFO Setting mlflow tracking + uri...\n2023-07-18 09:51:29 +0000 12930 promptflow-runtime INFO [diagnostic] + Token expire on: 2023-07-19 08:42:04, oid: 74013e41-d17e-462a-8db6-5c0e26c0368c, + scp: None\n2023-07-18 09:51:29 +0000 12930 promptflow-runtime INFO Set + token diagnostic finished in 0.022056567948311567 seconds\n2023-07-18 09:51:29 + +0000 12930 promptflow-runtime INFO Setting mlflow tracking uri...\n2023-07-18 + 09:51:29 +0000 15853 promptflow-runtime INFO Validating ''AzureML Data + Scientist'' user authentication...\n2023-07-18 09:51:29 +0000 12930 promptflow-runtime + INFO Validating ''AzureML Data Scientist'' user authentication...\n2023-07-18 + 09:51:29 +0000 12930 promptflow-runtime INFO Successfully validated + ''AzureML Data Scientist'' user authentication.\n2023-07-18 09:51:29 +0000 15853 + promptflow-runtime INFO Successfully validated ''AzureML Data Scientist'' + user authentication.\n2023-07-18 09:51:29 +0000 12930 promptflow-runtime + INFO Initialized table client for AzureMLRunTracker.\n2023-07-18 09:51:29 + +0000 12930 promptflow-runtime INFO Initialized blob service client + for AzureMLRunTracker.\n2023-07-18 09:51:29 +0000 12930 promptflow-runtime + INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-07-18 + 09:51:29 +0000 12930 promptflow-runtime INFO Start checking run status + for run 3dfd077a-f071-443e-9c4e-d41531710950\n2023-07-18 09:51:29 +0000 15853 + promptflow-runtime INFO Initialized table client for AzureMLRunTracker.\n2023-07-18 + 09:51:30 +0000 15853 promptflow-runtime INFO Initialized blob service + client for AzureMLRunTracker.\n2023-07-18 09:51:30 +0000 15853 promptflow-runtime + INFO Setting mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-07-18 + 09:51:30 +0000 15853 promptflow-runtime INFO Resolved 3 lines of data + from uri: azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/web_classification_data/versions/1\n2023-07-18 + 09:51:30 +0000 15853 promptflow-runtime INFO Resolve data from url finished + in 0.8368151811882854 seconds\n2023-07-18 09:51:30 +0000 15853 promptflow-runtime + INFO Start execute request: 3dfd077a-f071-443e-9c4e-d41531710950 in dir + requests/3dfd077a-f071-443e-9c4e-d41531710950...\n2023-07-18 09:51:30 +0000 15853 + execution ERROR Submission request failed. Exception: Input ''question'' + in line 0 is not provided for flow ''Simple_mock_answer''.\nTraceback (most + recent call last):\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/executor.py\", + line 243, in exec_request_raw\n return self._route_request_raw(raw_request, + raise_ex=raise_ex)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/executor.py\", + line 316, in _route_request_raw\n return self._exec_batch_request(\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/executor.py\", + line 393, in _exec_batch_request\n run_infos = self._exec_batch_request_inner(\n File + \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/executor.py\", + line 654, in _exec_batch_request_inner\n batch_inputs = FlowRequestValidator.ensure_batch_inputs_type(batch_request.flow, + batch_request.batch_inputs)\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_request_validator.py\", + line 99, in ensure_batch_inputs_type\n return [cls.ensure_flow_inputs_type(flow, + inputs, idx) for idx, inputs in enumerate(batch_inputs)]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_request_validator.py\", + line 99, in \n return [cls.ensure_flow_inputs_type(flow, inputs, + idx) for idx, inputs in enumerate(batch_inputs)]\n File \"/azureml-envs/prompt-flow/runtime/lib/python3.9/site-packages/promptflow/executor/flow_request_validator.py\", + line 192, in ensure_flow_inputs_type\n raise InputNotFound(\npromptflow.executor.flow_request_validator.InputNotFound: + Input ''question'' in line 0 is not provided for flow ''Simple_mock_answer''.\n2023-07-18 + 09:51:30 +0000 15853 execution INFO Updated run ''3dfd077a-f071-443e-9c4e-d41531710950'' + as failed in run info.\n2023-07-18 09:51:31 +0000 15853 promptflow-runtime + INFO Starting the aml run ''3dfd077a-f071-443e-9c4e-d41531710950''...\n2023-07-18 + 09:51:31 +0000 15853 promptflow-runtime INFO Ending the aml run ''3dfd077a-f071-443e-9c4e-d41531710950'' + with status ''Failed''...\n2023-07-18 09:51:31 +0000 15853 execution INFO Updated + run ''3dfd077a-f071-443e-9c4e-d41531710950'' as failed in run history.\n2023-07-18 + 09:51:31 +0000 15853 promptflow-runtime INFO Starting the aml run ''fd68a549-2027-4f0f-9f21-adc39cc86c94''...\n2023-07-18 + 09:51:32 +0000 15853 promptflow-runtime INFO Ending the aml run ''fd68a549-2027-4f0f-9f21-adc39cc86c94'' + with status ''Completed''...\n2023-07-18 09:51:32 +0000 15853 execution INFO Updated + bulk test run ''fd68a549-2027-4f0f-9f21-adc39cc86c94'' as Completed in run + history.\n2023-07-18 09:51:32 +0000 12930 promptflow-runtime INFO Process + 15853 finished\n2023-07-18 09:51:32 +0000 12930 promptflow-runtime WARNING Hit + exception when execute request: \n{''message'': \"Input ''question'' in line + 0 is not provided for flow ''Simple_mock_answer''.\", ''messageFormat'': '''', + ''messageParameters'': {}, ''referenceCode'': ''Executor'', ''code'': ''UserError'', + ''innerError'': {''code'': ''ValidationError'', ''innerError'': {''code'': + ''InvalidFlowRequest'', ''innerError'': {''code'': ''InputNotFound'', ''innerError'': + None}}}}\n2023-07-18 09:51:32 +0000 12930 promptflow-runtime ERROR Submit + flow request failed Code: 400 Exception type: + InnerException type: InputNotFound Exception type hierarchy: UserError/ValidationError/InvalidFlowRequest/InputNotFound\n2023-07-18 + 09:51:49 +0000 12930 promptflow-runtime INFO Running .get_run_status_with_retry + at 0x7fdbcdefd940>, 3 more tries to go.\n2023-07-18 09:51:49 +0000 12930 + promptflow-runtime INFO Run 3dfd077a-f071-443e-9c4e-d41531710950 is in + progress, Execution status: Failed\n"' + headers: + connection: + - keep-alive + content-length: + - '13452' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.525' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_invalid_run_logs.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_invalid_run_logs.yaml new file mode 100644 index 00000000000..07ba0fc3c0c --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_invalid_run_logs.yaml @@ -0,0 +1,146 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.037' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.076' + status: + code: 200 + message: OK +- request: + body: '{"runId": "non_exist_run", "selectRunMetadata": true, "selectRunDefinition": + true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"error": {"code": "UserError", "severity": null, "message": "Run runId=non_exist_run + was not found", "messageFormat": "Run {runId} was not found", "messageParameters": + {"runId": "runId=non_exist_run"}, "referenceCode": null, "detailsUri": null, + "target": null, "details": [], "innerError": {"code": "NotFoundError", "innerError": + null}, "debugInfo": null, "additionalInfo": null}, "correlation": {"operation": + "8a1f67982caf4cea98f7cbc98fe8a4fe", "request": "be7c31e90a252e03"}, "environment": + "eastus", "location": "eastus", "time": "2023-10-18T09:12:57.9413107+00:00", + "componentName": "run-history", "statusCode": 404}' + headers: + connection: + - keep-alive + content-length: + - '777' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.050' + status: + code: 404 + message: Run runId=68cd54bf-1af8-4d5c-8c17-556074765617 was not found +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_run_logs.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_run_logs.yaml new file mode 100644 index 00000000000..3f815f6b068 --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_stream_run_logs.yaml @@ -0,0 +1,599 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.035' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.072' + status: + code: 200 + message: OK +- request: + body: '{"runId": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "selectRunMetadata": + true, "selectRunDefinition": true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1687749782, "rootRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "createdUtc": "2023-06-26T03:23:02.3017884+00:00", "createdBy": {"userObjectId": + "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "userPuId": "100320007E5EB49B", "userIdp": + null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Zhen + Ruan", "upn": null}, "userId": "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "token": + null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": + 96, "statusRevision": 2, "runUuid": "fab99b4a-07c2-4c77-8ee3-3d3a47102b4a", + "parentRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", "rootRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:41.1688602", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T09:13:00.6007617+00:00", "duration": + "00:00:41.1688602", "cancelationReason": null, "currentAttemptId": 1, "runId": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "parentRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "experimentId": "ad8dde3a-73f1-49bc-ae71-c7c9b2dc1b9f", "status": "Completed", + "startTimeUtc": "2023-06-26T03:23:05.7657269+00:00", "endTimeUtc": "2023-06-26T03:23:46.9345871+00:00", + "scheduleId": null, "displayName": "test_display_name_8f5e92cd-3249-463a-9e51-3b4c6dd5d6ee", + "name": null, "dataContainerId": "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "description": "test_description_8f5e92cd-3249-463a-9e51-3b4c6dd5d6ee", "hidden": + false, "runType": "azureml.promptflow.EvaluationRun", "runTypeV2": {"orchestrator": + null, "traits": [], "attribution": null, "computeType": "MIR_v2"}, "properties": + {"azureml.promptflow.flow_id": "QnARelevanceEvaluation", "azureml.promptflow.flow_name": + "QnA Relevance Evaluation", "azureml.promptflow.flow_type": "Evaluation", + "azureml.promptflow.source_flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "azureml.promptflow.baseline_variant_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "azureml.promptflow.variant_ids": "", "azureml.promptflow.bulk_test_id": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "azureml.promptflow.flow_experiment_id": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20230614.v1", "azureml.promptflow.total_tokens": "26680"}, "parameters": + {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {"test_tag": "8f5e92cd-3249-463a-9e51-3b4c6dd5d6ee", "hod": "1"}, + "settings": {}, "services": {}, "inputDatasets": [], "outputDatasets": [], + "runDefinition": null, "jobSpecification": null, "primaryMetricName": null, + "createdFrom": null, "cancelUri": null, "completeUri": null, "diagnosticsUri": + null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + null, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "relevance_score", "Tool": "compute_relevance_score", + "Comment": null, "Inputs": {"question": "${flow.question}", "context": "${flow.context}", + "answer": "${flow.answer}", "max_tokens": "256", "deployment_name": "gpt-35-turbo", + "temperature": "0.0"}, "Api": "chat", "Provider": "AzureOpenAI", "Connection": + "azure_open_ai_connection", "Module": "promptflow.tools.aoai", "Reduce": false}, + {"Name": "concat_scores", "Tool": "concat_results", "Comment": null, "Inputs": + {"relevance_score": "${relevance_score.output}"}, "Api": null, "Provider": + null, "Connection": null, "Module": null, "Reduce": false}, {"Name": "aggregate_variants_results", + "Tool": "aggregate_variants_results", "Comment": null, "Inputs": {"results": + "${concat_scores.output}", "line_number": "${flow.line_number}", "variant_id": + "${flow.variant_id}"}, "Api": null, "Provider": null, "Connection": null, + "Module": null, "Reduce": true}], "Tools": [{"Name": "compute_relevance_score", + "Type": "llm", "Inputs": {"context": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}, "question": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}, "answer": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}}, "Outputs": null, "Description": "This is a llm + tool", "connection_type": null, "Module": null, "class_name": null, "Source": + "compute_relevance_score.jinja2", "LkgCode": null, "Code": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "concat_results", "Type": "python", "Inputs": {"relevance_score": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}}, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "concat_results.py", "LkgCode": null, "Code": "from promptflow + import tool\nimport numpy as np\nimport re\n\n\n@tool\ndef concat_results(relevance_score: + str):\n\n load_list = [{''name'': ''gpt_relevance'', ''score'': relevance_score}]\n score_list + = []\n errors = []\n for item in load_list:\n try:\n score + = item[\"score\"]\n match = re.search(r''\\d'', score)\n if + match:\n score = match.group()\n score = float(score)\n except + Exception as e:\n score = np.nan\n errors.append({\"name\": + item[\"name\"], \"msg\": str(e), \"data\": item[\"score\"]})\n score_list.append({\"name\": + item[\"name\"], \"score\": score})\n\n variant_level_result = {}\n for + item in score_list:\n item_name = str(item[\"name\"])\n variant_level_result[item_name] + = item[\"score\"]\n variant_level_result[item_name + ''_pass_rate''] + = 1 if item[\"score\"] > 3 else 0\n return variant_level_result\n", "Function": + "concat_results", "action_type": null, "provider_config": null, "function_config": + null, "is_builtin": false, "package": null, "package_version": null}, {"Name": + "aggregate_variants_results", "Type": "python", "Inputs": {"variant_id": {"Name": + null, "Type": ["object"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "model_list": null}, "line_number": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}, "results": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}}, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "aggregate_variants_results.py", "LkgCode": null, "Code": + "from typing import List\nfrom promptflow import tool, log_metric\nimport + numpy as np\n\n\n@tool\ndef aggregate_variants_results(variant_id: List[int], + line_number: List[int], results: List[dict]):\n aggregate_results = {}\n for + index in range(len(line_number)):\n result = results[index]\n variant + = variant_id[index]\n if variant not in aggregate_results.keys():\n aggregate_results[variant] + = {}\n item_result = aggregate_results[variant]\n for name, + value in result.items():\n if name not in item_result.keys():\n item_result[name] + = []\n try:\n float_val = float(value)\n except + Exception:\n float_val = np.nan\n item_result[name].append(float_val)\n\n for + name, value in aggregate_results.items():\n variant_id = name\n aggr_item + = aggregate_results[name]\n for name, value in aggr_item.items():\n metric_name + = name\n aggr_item[name] = np.nanmean(value)\n if ''pass_rate'' + in metric_name:\n metric_name = metric_name + \"(%)\"\n aggr_item[name] + = aggr_item[name] * 100.0\n aggr_item[name] = round(aggr_item[name], + 2)\n log_metric(metric_name, aggr_item[name], variant_id=variant_id)\n\n return + aggregate_results\n", "Function": "aggregate_variants_results", "action_type": + null, "provider_config": null, "function_config": null, "is_builtin": false, + "package": null, "package_version": null}], "Codes": {"compute_relevance_score.jinja2": + "System:\nYou are an AI assistant. You will be given the definition of an + evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "concat_results.py": "from promptflow import tool\nimport + numpy as np\nimport re\n\n\n@tool\ndef concat_results(relevance_score: str):\n\n load_list + = [{''name'': ''gpt_relevance'', ''score'': relevance_score}]\n score_list + = []\n errors = []\n for item in load_list:\n try:\n score + = item[\"score\"]\n match = re.search(r''\\d'', score)\n if + match:\n score = match.group()\n score = float(score)\n except + Exception as e:\n score = np.nan\n errors.append({\"name\": + item[\"name\"], \"msg\": str(e), \"data\": item[\"score\"]})\n score_list.append({\"name\": + item[\"name\"], \"score\": score})\n\n variant_level_result = {}\n for + item in score_list:\n item_name = str(item[\"name\"])\n variant_level_result[item_name] + = item[\"score\"]\n variant_level_result[item_name + ''_pass_rate''] + = 1 if item[\"score\"] > 3 else 0\n return variant_level_result\n", "aggregate_variants_results.py": + "from typing import List\nfrom promptflow import tool, log_metric\nimport + numpy as np\n\n\n@tool\ndef aggregate_variants_results(variant_id: List[int], + line_number: List[int], results: List[dict]):\n aggregate_results = {}\n for + index in range(len(line_number)):\n result = results[index]\n variant + = variant_id[index]\n if variant not in aggregate_results.keys():\n aggregate_results[variant] + = {}\n item_result = aggregate_results[variant]\n for name, + value in result.items():\n if name not in item_result.keys():\n item_result[name] + = []\n try:\n float_val = float(value)\n except + Exception:\n float_val = np.nan\n item_result[name].append(float_val)\n\n for + name, value in aggregate_results.items():\n variant_id = name\n aggr_item + = aggregate_results[name]\n for name, value in aggr_item.items():\n metric_name + = name\n aggr_item[name] = np.nanmean(value)\n if ''pass_rate'' + in metric_name:\n metric_name = metric_name + \"(%)\"\n aggr_item[name] + = aggr_item[name] * 100.0\n aggr_item[name] = round(aggr_item[name], + 2)\n log_metric(metric_name, aggr_item[name], variant_id=variant_id)\n\n return + aggregate_results\n"}, "Inputs": {"question": {"Name": null, "Type": "string", + "Default": null, "Description": null, "is_chat_input": false}, "context": + {"Name": null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false}, "answer": {"Name": null, "Type": "string", "Default": null, "Description": + null, "is_chat_input": false}, "line_number": {"Name": null, "Type": "int", + "Default": null, "Description": null, "is_chat_input": false}, "variant_id": + {"Name": null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false}}, "Outputs": {"gpt_relevance": {"Name": null, "Type": "object", "Description": + null, "Reference": "${concat_scores.output.gpt_relevance}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '21284' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.044' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Type: + - application/json + User-Agent: + - promptflow-sdk/0.0.1 azsdk-python-azuremachinelearningdesignerserviceclient/unknown + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://eastus.api.azureml.ms/flow/api/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/BulkRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/logContent + response: + body: + string: '"2023-06-26 03:23:03 +0000 126 promptflow-runtime INFO [4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74] + Receiving flow request 1a7fc96a-f497-44fc-beea-9347a1bb4c95: {\"flow_id\": + \"42f7ee16-2243-475b-af21-e8fabd4abd15\", \"flow_run_id\": \"4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74\", + \"submission_data\": {\"flow\": {\"id\": \"42f7ee16-2243-475b-af21-e8fabd4abd15\", + \"name\": \"QnA Relevance Evaluation\", \"nodes\": [{\"name\": \"relevance_score\", + \"tool\": \"compute_relevance_score\", \"inputs\": {\"question\": {\"value\": + \"question\", \"value_type\": \"FlowInput\", \"prefix\": \"flow.\"}, \"context\": + {\"value\": \"context\", \"value_type\": \"FlowInput\", \"prefix\": \"flow.\"}, + \"answer\": {\"value\": \"answer\", \"value_type\": \"FlowInput\", \"prefix\": + \"flow.\"}, \"max_tokens\": {\"value\": \"256\", \"value_type\": \"Literal\"}, + \"deployment_name\": {\"value\": \"gpt-35-turbo\", \"value_type\": \"Literal\"}, + \"temperature\": {\"value\": \"0.0\", \"value_type\": \"Literal\"}}, \"api\": + \"chat\", \"provider\": \"AzureOpenAI\", \"module\": \"promptflow.tools.aoai\", + \"connection\": \"azure_open_ai_connection\"}, {\"name\": \"concat_scores\", + \"tool\": \"concat_results\", \"inputs\": {\"relevance_score\": {\"value\": + \"relevance_score\", \"value_type\": \"NodeReference\", \"section\": \"output\"}}}, + {\"name\": \"aggregate_variants_results\", \"tool\": \"aggregate_variants_results\", + \"inputs\": {\"results\": {\"value\": \"concat_scores\", \"value_type\": \"NodeReference\", + \"section\": \"output\"}, \"line_number\": {\"value\": \"line_number\", \"value_type\": + \"FlowInput\", \"prefix\": \"flow.\"}, \"variant_id\": {\"value\": \"variant_id\", + \"value_type\": \"FlowInput\", \"prefix\": \"flow.\"}}, \"reduce\": true}], + \"inputs\": {\"question\": {\"type\": \"string\"}, \"context\": {\"type\": + \"string\"}, \"answer\": {\"type\": \"string\"}, \"line_number\": {\"type\": + \"int\"}, \"variant_id\": {\"type\": \"string\"}}, \"outputs\": {\"gpt_relevance\": + {\"type\": \"object\", \"reference\": {\"value\": \"concat_scores\", \"value_type\": + \"NodeReference\", \"section\": \"output\", \"property\": \"gpt_relevance\"}}}, + \"tools\": [{\"name\": \"compute_relevance_score\", \"type\": \"llm\", \"inputs\": + {\"context\": {\"type\": [\"string\"]}, \"question\": {\"type\": [\"string\"]}, + \"answer\": {\"type\": [\"string\"]}}, \"description\": \"This is a llm tool\", + \"source\": \"compute_relevance_score.jinja2\", \"code\": \"System:\\nYou + are an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\\n\\nUser:\\nRelevance measures how well the answer addresses the + main aspects of the question, based on the context. Consider whether all and + only the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\\nOne star: the answer + completely lacks relevance\\nTwo stars: the answer mostly lacks relevance\\nThree + stars: the answer is partially relevant\\nFour stars: the answer is mostly + relevant\\nFive stars: the answer has perfect relevance\\n\\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\\n\\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\\nquestion: What field did Marie Curie excel in?\\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\\nstars: 1\\n\\ncontext: The Beatles were an English rock + band formed in Liverpool in 1960, and they are widely regarded as the most + influential music band in history.\\nquestion: Where were The Beatles formed?\\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\\nstars: 2\\n\\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\\nquestion: What are the main + goals of Perseverance Mars rover mission?\\nanswer: The Perseverance Mars + rover mission focuses on searching for signs of ancient life on Mars.\\nstars: + 3\\n\\ncontext: The Mediterranean diet is a commonly recommended dietary plan + that emphasizes fruits, vegetables, whole grains, legumes, lean proteins, + and healthy fats. Studies have shown that it offers numerous health benefits, + including a reduced risk of heart disease and improved cognitive health.\\nquestion: + What are the main components of the Mediterranean diet?\\nanswer: The Mediterranean + diet primarily consists of fruits, vegetables, whole grains, and legumes.\\nstars: + 4\\n\\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\\nquestion: What are the main attractions of the Queen''s Royal + Castle?\\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\\nstars: 5\\n\\ncontext: {{context}}\\nquestion: {{question}}\\nanswer: + {{answer}}\\nstars:\"}, {\"name\": \"concat_results\", \"type\": \"python\", + \"inputs\": {\"relevance_score\": {\"type\": [\"string\"]}}, \"source\": \"concat_results.py\", + \"code\": \"from promptflow import tool\\nimport numpy as np\\nimport re\\n\\n\\n@tool\\ndef + concat_results(relevance_score: str):\\n\\n load_list = [{''name'': ''gpt_relevance'', + ''score'': relevance_score}]\\n score_list = []\\n errors = []\\n for + item in load_list:\\n try:\\n score = item[\\\"score\\\"]\\n match + = re.search(r''\\\\d'', score)\\n if match:\\n score + = match.group()\\n score = float(score)\\n except Exception + as e:\\n score = np.nan\\n errors.append({\\\"name\\\": + item[\\\"name\\\"], \\\"msg\\\": str(e), \\\"data\\\": item[\\\"score\\\"]})\\n score_list.append({\\\"name\\\": + item[\\\"name\\\"], \\\"score\\\": score})\\n\\n variant_level_result = + {}\\n for item in score_list:\\n item_name = str(item[\\\"name\\\"])\\n variant_level_result[item_name] + = item[\\\"score\\\"]\\n variant_level_result[item_name + ''_pass_rate''] + = 1 if item[\\\"score\\\"] > 3 else 0\\n return variant_level_result\\n\", + \"function\": \"concat_results\"}, {\"name\": \"aggregate_variants_results\", + \"type\": \"python\", \"inputs\": {\"variant_id\": {\"type\": [\"object\"]}, + \"line_number\": {\"type\": [\"object\"]}, \"results\": {\"type\": [\"object\"]}}, + \"source\": \"aggregate_variants_results.py\", \"code\": \"from typing import + List\\nfrom promptflow import tool, log_metric\\nimport numpy as np\\n\\n\\n@tool\\ndef + aggregate_variants_results(variant_id: List[int], line_number: List[int], + results: List[dict]):\\n aggregate_results = {}\\n for index in range(len(line_number)):\\n result + = results[index]\\n variant = variant_id[index]\\n if variant + not in aggregate_results.keys():\\n aggregate_results[variant] + = {}\\n item_result = aggregate_results[variant]\\n for name, + value in result.items():\\n if name not in item_result.keys():\\n item_result[name] + = []\\n try:\\n float_val = float(value)\\n except + Exception:\\n float_val = np.nan\\n item_result[name].append(float_val)\\n\\n for + name, value in aggregate_results.items():\\n variant_id = name\\n aggr_item + = aggregate_results[name]\\n for name, value in aggr_item.items():\\n metric_name + = name\\n aggr_item[name] = np.nanmean(value)\\n if + ''pass_rate'' in metric_name:\\n metric_name = metric_name + + \\\"(%)\\\"\\n aggr_item[name] = aggr_item[name] * 100.0\\n aggr_item[name] + = round(aggr_item[name], 2)\\n log_metric(metric_name, aggr_item[name], + variant_id=variant_id)\\n\\n return aggregate_results\\n\", \"function\": + \"aggregate_variants_results\"}]}, \"connections\": \"**data_scrubbed**\", + \"bulk_test_flow_run_ids\": [\"8a20d76d-0488-4deb-aae2-c4d99d30a1ed\", \"8a20d76d-0488-4deb-aae2-c4d99d30a1ed_9bafbd52-da0f-44ca-b40c-b0ce912f083c_variant_0\"], + \"bulk_test_flow_id\": \"42f7ee16-2243-475b-af21-e8fabd4abd15\", \"bulk_test_id\": + \"9bafbd52-da0f-44ca-b40c-b0ce912f083c\", \"inputs_mapping\": {\"question\": + \"data.url\", \"answer\": \"data.answer\", \"context\": \"data.evidence\"}}, + \"run_mode\": 4, \"created_by\": {\"user_object_id\": \"8471f65f-aa0e-4cde-b219-0ba7a1c148bf\", + \"user_tenant_id\": \"00000000-0000-0000-0000-000000000000\", \"user_name\": + \"Zhen Ruan\"}, \"bulk_test_data_input\": {\"data_uri\": \"azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/ae8b5183-dea9-4487-98a4-f8ed4fb934e7/versions/1.0.0\"}, + \"run_id_to_log_path\": {\"4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74\": \"https://promptfloweast4063704120.blob.core.windows.net/flowrun-logs/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/42f7ee16-2243-475b-af21-e8fabd4abd15/9bafbd52-da0f-44ca-b40c-b0ce912f083c/evaluationRuns/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74.txt?skoid=55b92eba-d7c7-4afd-ab76-7bb1cd345283&sktid=00000000-0000-0000-0000-000000000000&skt=2023-06-26T03%3A23%3A01Z&ske=2023-07-03T03%3A23%3A01Z&sks=b&skv=2021-10-04&sv=2021-10-04&se=2023-07-03T03%3A23%3A01Z&sr=b&sp=rw&sig=**data_scrubbed**\n2023-06-26 + 03:23:03 +0000 126 promptflow-runtime INFO Setup logger context.\n2023-06-26 + 03:23:03 +0000 126 promptflow-runtime INFO Init from request using + default credential\n2023-06-26 03:23:03 +0000 126 promptflow-runtime INFO Initializing + mlclient from request finished in 0.02641195198521018 seconds\n2023-06-26 + 03:23:03 +0000 126 promptflow-runtime INFO Getting storage account + key finished in 0.06218615802936256 seconds\n2023-06-26 03:23:03 +0000 126 + promptflow-runtime WARNING Failed to get storage account key: (AuthorizationFailed) + The client ''74013e41-d17e-462a-8db6-5c0e26c0368c'' with object id ''74013e41-d17e-462a-8db6-5c0e26c0368c'' + does not have authorization to perform action ''Microsoft.MachineLearningServices/workspaces/00000/action'' + over scope ''/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus'' + or the scope is invalid. If access was recently granted, please refresh your + credentials.\nCode: AuthorizationFailed\nMessage: The client ''74013e41-d17e-462a-8db6-5c0e26c0368c'' + with object id ''74013e41-d17e-462a-8db6-5c0e26c0368c'' does not have authorization + to perform action ''Microsoft.MachineLearningServices/workspaces/00000/action'' + over scope ''/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus'' + or the scope is invalid. If access was recently granted, please refresh your + credentials.\n2023-06-26 03:23:03 +0000 126 promptflow-runtime INFO Starting + to check process 5498 status\n2023-06-26 03:23:03 +0000 126 promptflow-runtime + INFO Start checking run status for bulk run 9bafbd52-da0f-44ca-b40c-b0ce912f083c\n2023-06-26 + 03:23:03 +0000 5498 promptflow-runtime INFO [126--5498] Start processing + flow......\n2023-06-26 03:23:03 +0000 126 promptflow-runtime INFO Running + .get_storage_from_config_with_retry + at 0x7f5b35a7f670>, 3 more tries to go.\n2023-06-26 03:23:03 +0000 5498 + promptflow-runtime INFO Using AzureMLRunStorage with compute identity.\n2023-06-26 + 03:23:03 +0000 5498 promptflow-runtime INFO [diagnostic] Token expire + on: 2023-06-27 02:48:23, oid: 74013e41-d17e-462a-8db6-5c0e26c0368c, scp: None\n2023-06-26 + 03:23:03 +0000 126 promptflow-runtime INFO Start checking run status + for run 9bafbd52-da0f-44ca-b40c-b0ce912f083c\n2023-06-26 03:23:03 +0000 5498 + promptflow-runtime INFO [diagnostic] Token expire on: 2023-06-27 02:48:24, + oid: 74013e41-d17e-462a-8db6-5c0e26c0368c, scp: None\n2023-06-26 03:23:03 + +0000 5498 promptflow-runtime INFO Set token diagnostic finished in + 0.04067643382586539 seconds\n2023-06-26 03:23:03 +0000 5498 promptflow-runtime + INFO Setting mlflow tracking uri...\n2023-06-26 03:23:03 +0000 5498 + promptflow-runtime INFO Validating ''AzureML Data Scientist'' user authentication...\n2023-06-26 + 03:23:04 +0000 5498 promptflow-runtime INFO Successfully validated + ''AzureML Data Scientist'' user authentication.\n2023-06-26 03:23:04 +0000 5498 + execution.bulk INFO Initialized table client for AzureMLRunTracker.\n2023-06-26 + 03:23:04 +0000 5498 execution.bulk INFO Initialized blob service + client for AzureMLRunTracker.\n2023-06-26 03:23:04 +0000 5498 execution.bulk INFO Setting + mlflow tracking uri to ''azureml://eastus.api.azureml.ms/mlflow/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/promptflow-eastus''\n2023-06-26 + 03:23:05 +0000 5498 promptflow-runtime INFO Resolved 20 lines of data + from uri: azureml:/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/3e123da1-f9a5-4c91-9234-8d9ffbb39ff5/ae8b5183-dea9-4487-98a4-f8ed4fb934e7/versions/1.0.0\n2023-06-26 + 03:23:05 +0000 5498 promptflow-runtime INFO Resolve data from url finished + in 0.6863185481633991 seconds\n2023-06-26 03:23:05 +0000 5498 promptflow-runtime + INFO Start execute request: 4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74 in dir + requests/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74...\n2023-06-26 03:23:05 +0000 5498 + execution.bulk INFO Starting the aml run ''4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74''...\n2023-06-26 + 03:23:06 +0000 5498 execution.bulk INFO Finished 4 / 40 lines.\n2023-06-26 + 03:23:06 +0000 5498 execution.bulk INFO Finished 8 / 40 lines.\n2023-06-26 + 03:23:07 +0000 5498 execution.bulk INFO Finished 12 / 40 lines.\n2023-06-26 + 03:23:07 +0000 5498 execution.bulk INFO Finished 16 / 40 lines.\n2023-06-26 + 03:23:07 +0000 5498 execution.bulk INFO Finished 20 / 40 lines.\n2023-06-26 + 03:23:07 +0000 5498 execution.bulk INFO Finished 24 / 40 lines.\n2023-06-26 + 03:23:07 +0000 5498 execution.bulk INFO Finished 28 / 40 lines.\n2023-06-26 + 03:23:07 +0000 5498 execution.bulk INFO Finished 32 / 40 lines.\n2023-06-26 + 03:23:07 +0000 5498 execution.bulk INFO Finished 36 / 40 lines.\n2023-06-26 + 03:23:23 +0000 126 promptflow-runtime INFO Running .get_run_status_with_retry + at 0x7f5b35b9f9d0>, 3 more tries to go.\n2023-06-26 03:23:36 +0000 5498 + execution.bulk WARNING [relevance_score in line 12] stderr> Exception + occurs: Timeout: Request timed out: HTTPSConnectionPool(host=''gpt-test-eus.openai.azure.com'', + port=443): Read timed out. (read timeout=30)\n2023-06-26 03:23:36 +0000 5498 + execution.bulk WARNING [relevance_score in line 12] stderr> Timeout #0, + but no Retry-After header, Back off 8.0 seconds for retry.\n2023-06-26 03:23:36 + +0000 126 promptflow-runtime WARNING , Retrying + in 6 seconds...\n2023-06-26 03:23:36 +0000 126 promptflow-runtime INFO Running + .get_run_status_with_retry + at 0x7f5b35b9f9d0>, 2 more tries to go.\n2023-06-26 03:23:44 +0000 5498 + execution.bulk INFO Finished 40 / 40 lines.\n2023-06-26 03:23:44 +0000 5498 + execution.bulk INFO Executing reduce nodes...\n2023-06-26 03:23:44 + +0000 5498 execution.bulk INFO Finish executing reduce nodes.\n2023-06-26 + 03:23:44 +0000 5498 execution.bulk INFO Aggregating child run errors + for root run ''4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74'' if child run has error...\n2023-06-26 + 03:23:46 +0000 5498 execution.bulk INFO Upload metrics for run + 4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74 finished in 1.8221911741420627 seconds\n2023-06-26 + 03:23:46 +0000 5498 execution.bulk INFO Successfully write run + properties {\"azureml.promptflow.total_tokens\": 26680} with run id ''4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74''\n2023-06-26 + 03:23:46 +0000 5498 execution.bulk INFO Upload RH properties for + run 4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74 finished in 0.0726369780022651 seconds\n2023-06-26 + 03:23:46 +0000 5498 execution.bulk INFO Ending the aml run ''4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74'' + with status ''Completed''...\n2023-06-26 03:23:47 +0000 126 promptflow-runtime + INFO Process 5498 finished\n2023-06-26 03:23:47 +0000 126 promptflow-runtime + INFO [126] Child process finished!\n2023-06-26 03:23:47 +0000 126 + promptflow-runtime INFO [4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74] End processing + flow\n2023-06-26 03:23:52 +0000 126 promptflow-runtime WARNING , Retrying in 12 seconds...\n"' + headers: + connection: + - keep-alive + content-length: + - '17743' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.604' + status: + code: 200 + message: OK +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_tools_json_ignored.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_tools_json_ignored.yaml new file mode 100644 index 00000000000..aecb037a37e --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_tools_json_ignored.yaml @@ -0,0 +1,452 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.028' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.048' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.075' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.111' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:05:19 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '379' + content-md5: + - lI/pz9jzTQ7Td3RHPL7y7w== + content-type: + - application/octet-stream + last-modified: + - Tue, 25 Jul 2023 06:21:56 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Tue, 25 Jul 2023 06:21:56 GMT + x-ms-meta-name: + - e0068493-1fbe-451c-96b3-cf6b013632ad + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - 1f73938f-def0-4a75-b4d0-6b07a2378e1b + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:05:20 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/webClassification3.jsonl + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}' + headers: + cache-control: + - no-cache + content-length: + - '1227' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.185' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '0' + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: POST + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore/listSecrets + response: + body: + string: '{"secretsType": "AccountKey", "key": "dGhpcyBpcyBmYWtlIGtleQ=="}' + headers: + cache-control: + - no-cache + content-length: + - '134' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.126' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:05:23 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/LocalUpload/000000000000000000000000000000000000/flow_with_dict_input/flow.dag.yaml + response: + body: + string: '' + headers: + accept-ranges: + - bytes + content-length: + - '245' + content-md5: + - NJ+RHeG4z7emGpIQkHVUaA== + content-type: + - application/octet-stream + last-modified: + - Thu, 17 Aug 2023 10:30:41 GMT + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + vary: + - Origin + x-ms-blob-type: + - BlockBlob + x-ms-creation-time: + - Thu, 17 Aug 2023 10:30:41 GMT + x-ms-meta-name: + - 45e1c95d-7502-4635-a7f0-f46ca730e5e1 + x-ms-meta-upload_status: + - completed + x-ms-meta-version: + - '1' + x-ms-version: + - '2023-08-03' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/xml + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - azsdk-python-storage-blob/12.18.0 Python/3.10.13 (Windows-10-10.0.22621-SP0) + x-ms-date: + - Wed, 18 Oct 2023 10:05:24 GMT + x-ms-version: + - '2023-08-03' + method: HEAD + uri: https://fake_account_name.blob.core.windows.net/fake-container-name/az-ml-artifacts/000000000000000000000000000000000000/flow_with_dict_input/flow.dag.yaml + response: + body: + string: '' + headers: + server: + - Windows-Azure-Blob/1.0 Microsoft-HTTPAPI/2.0 + transfer-encoding: + - chunked + vary: + - Origin + x-ms-error-code: + - BlobNotFound + x-ms-version: + - '2023-08-03' + status: + code: 404 + message: The specified blob does not exist. +version: 1 diff --git a/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_update_run.yaml b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_update_run.yaml new file mode 100644 index 00000000000..a7adb31a05b --- /dev/null +++ b/src/promptflow/tests/test_configs/recordings/test_run_operations_TestFlowRun_test_update_run.yaml @@ -0,0 +1,514 @@ +interactions: +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000 + response: + body: + string: '{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000", + "name": "00000", "type": "Microsoft.MachineLearningServices/workspaces", "location": + "eastus", "tags": {}, "etag": null, "kind": "Default", "sku": {"name": "Basic", + "tier": "Basic"}, "properties": {"discoveryUrl": "https://eastus.api.azureml.ms/discovery"}}' + headers: + cache-control: + - no-cache + content-length: + - '3519' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.030' + status: + code: 200 + message: OK +- request: + body: null + headers: + Accept: + - application/json + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + User-Agent: + - promptflow-sdk/0.0.1 azure-ai-ml/1.10.0 azsdk-python-mgmt-machinelearningservices/0.1.0 + Python/3.10.13 (Windows-10-10.0.22621-SP0) + method: GET + uri: https://management.azure.com/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores?count=30&isDefault=true&orderByAsc=false + response: + body: + string: '{"value": [{"id": "/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/datastores/workspaceblobstore", + "name": "workspaceblobstore", "type": "Microsoft.MachineLearningServices/workspaces/datastores", + "properties": {"description": null, "tags": null, "properties": null, "isDefault": + true, "credentials": {"credentialsType": "AccountKey"}, "intellectualProperty": + null, "subscriptionId": "00000000-0000-0000-0000-000000000000", "resourceGroup": + "00000", "datastoreType": "AzureBlob", "accountName": "fake_account_name", + "containerName": "fake-container-name", "endpoint": "core.windows.net", "protocol": + "https", "serviceDataAccessAuthIdentity": "WorkspaceSystemAssignedIdentity"}, + "systemData": {"createdAt": "2023-04-08T02:53:06.5886442+00:00", "createdBy": + "779301c0-18b2-4cdc-801b-a0a3368fee0a", "createdByType": "Application", "lastModifiedAt": + "2023-04-08T02:53:07.521127+00:00", "lastModifiedBy": "779301c0-18b2-4cdc-801b-a0a3368fee0a", + "lastModifiedByType": "Application"}}]}' + headers: + cache-control: + - no-cache + content-length: + - '1372' + content-type: + - application/json; charset=utf-8 + expires: + - '-1' + pragma: + - no-cache + strict-transport-security: + - max-age=31536000; includeSubDomains + vary: + - Accept-Encoding + x-cache: + - CONFIG_NOCACHE + x-content-type-options: + - nosniff + x-request-time: + - '0.133' + status: + code: 200 + message: OK +- request: + body: '{"displayName": "test_display_name_test_mark", "description": "test_description_test_mark", + "tags": {"test_tag": "test_mark"}}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '207' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: PATCH + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/runs/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/modify + response: + body: + string: '{"runNumber": 1687749782, "rootRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "createdUtc": "2023-06-26T03:23:02.3017884+00:00", "createdBy": {"userObjectId": + "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "userPuId": "100320007E5EB49B", "userIdp": + null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Zhen + Ruan", "upn": null}, "userId": "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "token": + null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": + 103, "statusRevision": 2, "runUuid": "fab99b4a-07c2-4c77-8ee3-3d3a47102b4a", + "parentRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", "rootRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:41.1688602", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T09:21:32.8226253+00:00", "duration": + "00:00:41.1688602", "cancelationReason": null, "currentAttemptId": 1, "runId": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "parentRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "experimentId": "ad8dde3a-73f1-49bc-ae71-c7c9b2dc1b9f", "status": "Completed", + "startTimeUtc": "2023-06-26T03:23:05.7657269+00:00", "endTimeUtc": "2023-06-26T03:23:46.9345871+00:00", + "scheduleId": null, "displayName": "test_display_name_test_mark", "name": + null, "dataContainerId": "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "description": + "test_description_test_mark", "hidden": false, "runType": "azureml.promptflow.EvaluationRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": null, "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.flow_id": "QnARelevanceEvaluation", + "azureml.promptflow.flow_name": "QnA Relevance Evaluation", "azureml.promptflow.flow_type": + "Evaluation", "azureml.promptflow.source_flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "azureml.promptflow.baseline_variant_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "azureml.promptflow.variant_ids": "", "azureml.promptflow.bulk_test_id": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "azureml.promptflow.flow_experiment_id": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20230614.v1", "azureml.promptflow.total_tokens": "26680"}, "parameters": + {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {"test_tag": "test_mark", "hod": "1"}, "settings": {}, "services": + {}, "inputDatasets": [], "outputDatasets": [], "runDefinition": null, "jobSpecification": + null, "primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri": + null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + null, "queueingInfo": null, "inputs": null, "outputs": null}' + headers: + connection: + - keep-alive + content-length: + - '3617' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.073' + status: + code: 200 + message: OK +- request: + body: '{}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '2' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: PATCH + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/runs/4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74/modify + response: + body: + string: '{"runNumber": 1687749782, "rootRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "createdUtc": "2023-06-26T03:23:02.3017884+00:00", "createdBy": {"userObjectId": + "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "userPuId": "100320007E5EB49B", "userIdp": + null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Zhen + Ruan", "upn": null}, "userId": "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "token": + null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": + 103, "statusRevision": 2, "runUuid": "fab99b4a-07c2-4c77-8ee3-3d3a47102b4a", + "parentRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", "rootRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:41.1688602", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T09:21:37.3705009+00:00", "duration": + "00:00:41.1688602", "cancelationReason": null, "currentAttemptId": 1, "runId": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "parentRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "experimentId": "ad8dde3a-73f1-49bc-ae71-c7c9b2dc1b9f", "status": "Completed", + "startTimeUtc": "2023-06-26T03:23:05.7657269+00:00", "endTimeUtc": "2023-06-26T03:23:46.9345871+00:00", + "scheduleId": null, "displayName": "test_display_name_test_mark", "name": + null, "dataContainerId": "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "description": + "test_description_test_mark", "hidden": false, "runType": "azureml.promptflow.EvaluationRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": null, "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.flow_id": "QnARelevanceEvaluation", + "azureml.promptflow.flow_name": "QnA Relevance Evaluation", "azureml.promptflow.flow_type": + "Evaluation", "azureml.promptflow.source_flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "azureml.promptflow.baseline_variant_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "azureml.promptflow.variant_ids": "", "azureml.promptflow.bulk_test_id": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "azureml.promptflow.flow_experiment_id": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20230614.v1", "azureml.promptflow.total_tokens": "26680"}, "parameters": + {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {"test_tag": "test_mark", "hod": "1"}, "settings": {}, "services": + {}, "inputDatasets": [], "outputDatasets": [], "runDefinition": null, "jobSpecification": + null, "primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri": + null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + null, "queueingInfo": null, "inputs": null, "outputs": null}' + headers: + connection: + - keep-alive + content-length: + - '3617' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.075' + status: + code: 200 + message: OK +- request: + body: '{"runId": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "selectRunMetadata": + true, "selectRunDefinition": true, "selectJobSpecification": true}' + headers: + Accept: + - '*/*' + Accept-Encoding: + - gzip, deflate + Connection: + - keep-alive + Content-Length: + - '137' + Content-Type: + - application/json + User-Agent: + - python-requests/2.31.0 + method: POST + uri: https://eastus.api.azureml.ms/history/v1.0/subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/00000/providers/Microsoft.MachineLearningServices/workspaces/00000/rundata + response: + body: + string: '{"runMetadata": {"runNumber": 1687749782, "rootRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "createdUtc": "2023-06-26T03:23:02.3017884+00:00", "createdBy": {"userObjectId": + "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "userPuId": "100320007E5EB49B", "userIdp": + null, "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "Zhen + Ruan", "upn": null}, "userId": "8471f65f-aa0e-4cde-b219-0ba7a1c148bf", "token": + null, "tokenExpiryTimeUtc": null, "error": null, "warnings": null, "revision": + 103, "statusRevision": 2, "runUuid": "fab99b4a-07c2-4c77-8ee3-3d3a47102b4a", + "parentRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", "rootRunUuid": "1b1db1ed-46ed-4106-bf87-52e3c423b395", + "lastStartTimeUtc": null, "currentComputeTime": null, "computeDuration": "00:00:41.1688602", + "effectiveStartTimeUtc": null, "lastModifiedBy": {"userObjectId": "4e60fbf3-0338-41a8-bed5-fc341be556f8", + "userPuId": null, "userIdp": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userAltSecId": null, "userIss": "https://sts.windows.net/00000000-0000-0000-0000-000000000000/", + "userTenantId": "00000000-0000-0000-0000-000000000000", "userName": "4cbd0e2e-aae4-4099-b4ba-94d3a4910587", + "upn": null}, "lastModifiedUtc": "2023-10-18T09:21:32.8226253+00:00", "duration": + "00:00:41.1688602", "cancelationReason": null, "currentAttemptId": 1, "runId": + "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "parentRunId": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "experimentId": "ad8dde3a-73f1-49bc-ae71-c7c9b2dc1b9f", "status": "Completed", + "startTimeUtc": "2023-06-26T03:23:05.7657269+00:00", "endTimeUtc": "2023-06-26T03:23:46.9345871+00:00", + "scheduleId": null, "displayName": "test_display_name_test_mark", "name": + null, "dataContainerId": "dcid.4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", "description": + "test_description_test_mark", "hidden": false, "runType": "azureml.promptflow.EvaluationRun", + "runTypeV2": {"orchestrator": null, "traits": [], "attribution": null, "computeType": + "MIR_v2"}, "properties": {"azureml.promptflow.flow_id": "QnARelevanceEvaluation", + "azureml.promptflow.flow_name": "QnA Relevance Evaluation", "azureml.promptflow.flow_type": + "Evaluation", "azureml.promptflow.source_flow_id": "42f7ee16-2243-475b-af21-e8fabd4abd15", + "azureml.promptflow.baseline_variant_run_id": "4cf2d5e9-c78f-4ab8-a3ee-57675f92fb74", + "azureml.promptflow.variant_ids": "", "azureml.promptflow.bulk_test_id": "9bafbd52-da0f-44ca-b40c-b0ce912f083c", + "azureml.promptflow.flow_experiment_id": "3e123da1-f9a5-4c91-9234-8d9ffbb39ff5", + "azureml.promptflow.runtime_name": "demo-mir", "azureml.promptflow.runtime_version": + "20230614.v1", "azureml.promptflow.total_tokens": "26680"}, "parameters": + {}, "actionUris": {}, "scriptName": null, "target": null, "uniqueChildRunComputeTargets": + [], "tags": {"test_tag": "test_mark", "hod": "1"}, "settings": {}, "services": + {}, "inputDatasets": [], "outputDatasets": [], "runDefinition": null, "jobSpecification": + null, "primaryMetricName": null, "createdFrom": null, "cancelUri": null, "completeUri": + null, "diagnosticsUri": null, "computeRequest": null, "compute": null, "retainForLifetimeOfWorkspace": + null, "queueingInfo": null, "inputs": null, "outputs": null}, "runDefinition": + {"Nodes": [{"Name": "relevance_score", "Tool": "compute_relevance_score", + "Comment": null, "Inputs": {"question": "${flow.question}", "context": "${flow.context}", + "answer": "${flow.answer}", "max_tokens": "256", "deployment_name": "gpt-35-turbo", + "temperature": "0.0"}, "Api": "chat", "Provider": "AzureOpenAI", "Connection": + "azure_open_ai_connection", "Module": "promptflow.tools.aoai", "Reduce": false}, + {"Name": "concat_scores", "Tool": "concat_results", "Comment": null, "Inputs": + {"relevance_score": "${relevance_score.output}"}, "Api": null, "Provider": + null, "Connection": null, "Module": null, "Reduce": false}, {"Name": "aggregate_variants_results", + "Tool": "aggregate_variants_results", "Comment": null, "Inputs": {"results": + "${concat_scores.output}", "line_number": "${flow.line_number}", "variant_id": + "${flow.variant_id}"}, "Api": null, "Provider": null, "Connection": null, + "Module": null, "Reduce": true}], "Tools": [{"Name": "compute_relevance_score", + "Type": "llm", "Inputs": {"context": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}, "question": {"Name": null, "Type": ["string"], + "Default": null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}, "answer": {"Name": null, "Type": ["string"], "Default": + null, "Description": null, "Enum": null, "enabled_by": null, "enabled_by_type": + null, "model_list": null}}, "Outputs": null, "Description": "This is a llm + tool", "connection_type": null, "Module": null, "class_name": null, "Source": + "compute_relevance_score.jinja2", "LkgCode": null, "Code": "System:\nYou are + an AI assistant. You will be given the definition of an evaluation metric + for assessing the quality of an answer in a question-answering task. Your + job is to compute an accurate evaluation score using the provided evaluation + metric.\n\nUser:\nRelevance measures how well the answer addresses the main + aspects of the question, based on the context. Consider whether all and only + the important aspects are contained in the answer when evaluating relevance. + Given the context and question, score the relevance of the answer between + one to five stars using the following rating scale:\nOne star: the answer + completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "Function": null, "action_type": null, "provider_config": + null, "function_config": null, "is_builtin": false, "package": null, "package_version": + null}, {"Name": "concat_results", "Type": "python", "Inputs": {"relevance_score": + {"Name": null, "Type": ["string"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}}, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "concat_results.py", "LkgCode": null, "Code": "from promptflow + import tool\nimport numpy as np\nimport re\n\n\n@tool\ndef concat_results(relevance_score: + str):\n\n load_list = [{''name'': ''gpt_relevance'', ''score'': relevance_score}]\n score_list + = []\n errors = []\n for item in load_list:\n try:\n score + = item[\"score\"]\n match = re.search(r''\\d'', score)\n if + match:\n score = match.group()\n score = float(score)\n except + Exception as e:\n score = np.nan\n errors.append({\"name\": + item[\"name\"], \"msg\": str(e), \"data\": item[\"score\"]})\n score_list.append({\"name\": + item[\"name\"], \"score\": score})\n\n variant_level_result = {}\n for + item in score_list:\n item_name = str(item[\"name\"])\n variant_level_result[item_name] + = item[\"score\"]\n variant_level_result[item_name + ''_pass_rate''] + = 1 if item[\"score\"] > 3 else 0\n return variant_level_result\n", "Function": + "concat_results", "action_type": null, "provider_config": null, "function_config": + null, "is_builtin": false, "package": null, "package_version": null}, {"Name": + "aggregate_variants_results", "Type": "python", "Inputs": {"variant_id": {"Name": + null, "Type": ["object"], "Default": null, "Description": null, "Enum": null, + "enabled_by": null, "enabled_by_type": null, "model_list": null}, "line_number": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}, "results": + {"Name": null, "Type": ["object"], "Default": null, "Description": null, "Enum": + null, "enabled_by": null, "enabled_by_type": null, "model_list": null}}, "Outputs": + null, "Description": null, "connection_type": null, "Module": null, "class_name": + null, "Source": "aggregate_variants_results.py", "LkgCode": null, "Code": + "from typing import List\nfrom promptflow import tool, log_metric\nimport + numpy as np\n\n\n@tool\ndef aggregate_variants_results(variant_id: List[int], + line_number: List[int], results: List[dict]):\n aggregate_results = {}\n for + index in range(len(line_number)):\n result = results[index]\n variant + = variant_id[index]\n if variant not in aggregate_results.keys():\n aggregate_results[variant] + = {}\n item_result = aggregate_results[variant]\n for name, + value in result.items():\n if name not in item_result.keys():\n item_result[name] + = []\n try:\n float_val = float(value)\n except + Exception:\n float_val = np.nan\n item_result[name].append(float_val)\n\n for + name, value in aggregate_results.items():\n variant_id = name\n aggr_item + = aggregate_results[name]\n for name, value in aggr_item.items():\n metric_name + = name\n aggr_item[name] = np.nanmean(value)\n if ''pass_rate'' + in metric_name:\n metric_name = metric_name + \"(%)\"\n aggr_item[name] + = aggr_item[name] * 100.0\n aggr_item[name] = round(aggr_item[name], + 2)\n log_metric(metric_name, aggr_item[name], variant_id=variant_id)\n\n return + aggregate_results\n", "Function": "aggregate_variants_results", "action_type": + null, "provider_config": null, "function_config": null, "is_builtin": false, + "package": null, "package_version": null}], "Codes": {"compute_relevance_score.jinja2": + "System:\nYou are an AI assistant. You will be given the definition of an + evaluation metric for assessing the quality of an answer in a question-answering + task. Your job is to compute an accurate evaluation score using the provided + evaluation metric.\n\nUser:\nRelevance measures how well the answer addresses + the main aspects of the question, based on the context. Consider whether all + and only the important aspects are contained in the answer when evaluating + relevance. Given the context and question, score the relevance of the answer + between one to five stars using the following rating scale:\nOne star: the + answer completely lacks relevance\nTwo stars: the answer mostly lacks relevance\nThree + stars: the answer is partially relevant\nFour stars: the answer is mostly + relevant\nFive stars: the answer has perfect relevance\n\nThis rating value + should always be an integer between 1 and 5. So the rating produced should + be 1 or 2 or 3 or 4 or 5.\n\ncontext: Marie Curie was a Polish-born physicist + and chemist who pioneered research on radioactivity and was the first woman + to win a Nobel Prize.\nquestion: What field did Marie Curie excel in?\nanswer: + Marie Curie was a renowned painter who focused mainly on impressionist styles + and techniques.\nstars: 1\n\ncontext: The Beatles were an English rock band + formed in Liverpool in 1960, and they are widely regarded as the most influential + music band in history.\nquestion: Where were The Beatles formed?\nanswer: + The band The Beatles began their journey in London, England, and they changed + the history of music.\nstars: 2\n\ncontext: The recent Mars rover, Perseverance, + was launched in 2020 with the main goal of searching for signs of ancient + life on Mars. The rover also carries an experiment called MOXIE, which aims + to generate oxygen from the Martian atmosphere.\nquestion: What are the main + goals of Perseverance Mars rover mission?\nanswer: The Perseverance Mars rover + mission focuses on searching for signs of ancient life on Mars.\nstars: 3\n\ncontext: + The Mediterranean diet is a commonly recommended dietary plan that emphasizes + fruits, vegetables, whole grains, legumes, lean proteins, and healthy fats. + Studies have shown that it offers numerous health benefits, including a reduced + risk of heart disease and improved cognitive health.\nquestion: What are the + main components of the Mediterranean diet?\nanswer: The Mediterranean diet + primarily consists of fruits, vegetables, whole grains, and legumes.\nstars: + 4\n\ncontext: The Queen''s Royal Castle is a well-known tourist attraction + in the United Kingdom. It spans over 500 acres and contains extensive gardens + and parks. The castle was built in the 15th century and has been home to generations + of royalty.\nquestion: What are the main attractions of the Queen''s Royal + Castle?\nanswer: The main attractions of the Queen''s Royal Castle are its + expansive 500-acre grounds, extensive gardens, parks, and the historical castle + itself, which dates back to the 15th century and has housed generations of + royalty.\nstars: 5\n\ncontext: {{context}}\nquestion: {{question}}\nanswer: + {{answer}}\nstars:", "concat_results.py": "from promptflow import tool\nimport + numpy as np\nimport re\n\n\n@tool\ndef concat_results(relevance_score: str):\n\n load_list + = [{''name'': ''gpt_relevance'', ''score'': relevance_score}]\n score_list + = []\n errors = []\n for item in load_list:\n try:\n score + = item[\"score\"]\n match = re.search(r''\\d'', score)\n if + match:\n score = match.group()\n score = float(score)\n except + Exception as e:\n score = np.nan\n errors.append({\"name\": + item[\"name\"], \"msg\": str(e), \"data\": item[\"score\"]})\n score_list.append({\"name\": + item[\"name\"], \"score\": score})\n\n variant_level_result = {}\n for + item in score_list:\n item_name = str(item[\"name\"])\n variant_level_result[item_name] + = item[\"score\"]\n variant_level_result[item_name + ''_pass_rate''] + = 1 if item[\"score\"] > 3 else 0\n return variant_level_result\n", "aggregate_variants_results.py": + "from typing import List\nfrom promptflow import tool, log_metric\nimport + numpy as np\n\n\n@tool\ndef aggregate_variants_results(variant_id: List[int], + line_number: List[int], results: List[dict]):\n aggregate_results = {}\n for + index in range(len(line_number)):\n result = results[index]\n variant + = variant_id[index]\n if variant not in aggregate_results.keys():\n aggregate_results[variant] + = {}\n item_result = aggregate_results[variant]\n for name, + value in result.items():\n if name not in item_result.keys():\n item_result[name] + = []\n try:\n float_val = float(value)\n except + Exception:\n float_val = np.nan\n item_result[name].append(float_val)\n\n for + name, value in aggregate_results.items():\n variant_id = name\n aggr_item + = aggregate_results[name]\n for name, value in aggr_item.items():\n metric_name + = name\n aggr_item[name] = np.nanmean(value)\n if ''pass_rate'' + in metric_name:\n metric_name = metric_name + \"(%)\"\n aggr_item[name] + = aggr_item[name] * 100.0\n aggr_item[name] = round(aggr_item[name], + 2)\n log_metric(metric_name, aggr_item[name], variant_id=variant_id)\n\n return + aggregate_results\n"}, "Inputs": {"question": {"Name": null, "Type": "string", + "Default": null, "Description": null, "is_chat_input": false}, "context": + {"Name": null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false}, "answer": {"Name": null, "Type": "string", "Default": null, "Description": + null, "is_chat_input": false}, "line_number": {"Name": null, "Type": "int", + "Default": null, "Description": null, "is_chat_input": false}, "variant_id": + {"Name": null, "Type": "string", "Default": null, "Description": null, "is_chat_input": + false}}, "Outputs": {"gpt_relevance": {"Name": null, "Type": "object", "Description": + null, "Reference": "${concat_scores.output.gpt_relevance}", "evaluation_only": + false, "is_chat_output": false}}}, "jobSpecification": null, "systemSettings": + null}' + headers: + connection: + - keep-alive + content-length: + - '21285' + content-type: + - application/json; charset=utf-8 + strict-transport-security: + - max-age=15724800; includeSubDomains; preload + transfer-encoding: + - chunked + vary: + - Accept-Encoding + x-content-type-options: + - nosniff + x-request-time: + - '0.051' + status: + code: 200 + message: OK +version: 1