Skip to content

Merge pull request #3 from AbeOmor/main #4

Merge pull request #3 from AbeOmor/main

Merge pull request #3 from AbeOmor/main #4

name: Test and Evaulate Prompts with Promptflow
on:
workflow_dispatch:
push:
branches: [ main ]
env:
GROUP: ${{secrets.GROUP}}
WORKSPACE: ${{secrets.WORKSPACE}}
SUBSCRIPTION_ID: ${{secrets.SUBSCRIPTION_ID}}
RUN_NAME: web_classification_variant_1_20230816_215600_605116
EVAL_RUN_NAME: classification_accuracy_eval_default_20230821_111809_077086
jobs:
login-and-run-and-evalpf:
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@v2
- name: Install az ml extension
run: az extension add -n ml -y
- name: Azure login
uses: azure/login@v1
with:
creds: ${{secrets.AZURE_CREDENTIALS}}
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.11.4'
- name: List current directory
run: ls
- name: Install promptflow
run: pip install -r promptflow/web-classification/requirements.txt
- name: Run promptflow
run: |
pfazure run create -f promptflow/web-classification/run.yml --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}} --stream > promptflow/llmops-helper/run_info.txt
cat promptflow/llmops-helper/run_info.txt
- name: Set run name
run: |
echo "RUN_NAME=$(python promptflow/llmops-helper/parse_run_output.py run_info.txt)" >> "$GITHUB_ENV"
- name: Show the current run name
run: echo "Run name is:" ${{env.RUN_NAME}}
- name: Show promptflow results
run: pfazure run show-details --name ${{env.RUN_NAME}} --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}}
- name: Run promptflow evaluations
run: pfazure run create -f promptflow/web-classification/run_evaluation.yml --run ${{env.RUN_NAME}} --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}} --stream > promptflow/llmops-helper/eval_info.txt
- name: Get eval run name
run: echo "EVAL_RUN_NAME=$(python promptflow/llmops-helper/parse_run_output.py eval_info.txt)" >> "$GITHUB_ENV"
- name: Show promptflow details
run: pfazure run show-details --name ${{env.EVAL_RUN_NAME}} --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}}
- name: Show promptflow metrics
run: pfazure run show-metrics --name ${{env.EVAL_RUN_NAME}} --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}} > promptflow/llmops-helper/eval_result.json
assert-and-register-model:
needs: login-and-run-and-evalpf
runs-on: ubuntu-latest
steps:
- name: Check out repo
uses: actions/checkout@v2
- name: Install az ml extension
run: az extension add -n ml -y
- name: Azure login
uses: azure/login@v1
with:
creds: ${{secrets.AZURE_CREDENTIALS}}
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: '3.11.4'
- name: Set default subscription
run: |
az account set -s ${{env.SUBSCRIPTION}}
- name: List current directory
run: ls
- name: Install promptflow
run: pip install -r promptflow/web-classification/requirements.txt
- name: Get assert eval results
id: jobMetricAssert
run: |
export ASSERT=$(python promptflow/llmops-helper/assert.py result.json 0.6) # NOTE <file>.json is the file name and decimal is the threshold for the assertion
echo "::debug::Assert has returned the following value: $ASSERT"
# assert.py will return True or False, but bash expects lowercase.
if ${ASSERT,,} ; then
echo "::debug::Prompt flow run met the quality bar and can be deployed."
echo "::set-output name=result::true"
else
echo "::warning::Prompt flow run didn't meet quality bar."
echo "::set-output name=result::false"
fi
- name: Register promptflow model
if: ${{ steps.jobMetricAssert.outputs.result == 'true' }}
run: az ml model create --file promptflow/deployment/model.yaml -g ${{env.GROUP}} -w ${{env.WORKSPACE}}