Skip to content

TorchBench V2 nightly #310

TorchBench V2 nightly

TorchBench V2 nightly #310

Workflow file for this run

name: TorchBench V2 nightly
on:
workflow_dispatch:
schedule:
- cron: '0 14 * * *' # run at 2 PM UTC
jobs:
run-benchmark:
environment: docker-s3-upload
env:
TORCHBENCH_VER: "v2"
CONFIG_VER: "v2"
PYTHON_VER: "3.10"
CUDA_VER: "12.1"
MAGMA_VERSION: "magma-cuda121"
CONDA_ENV_NAME: "torchbench-v2-nightly-ci"
OUTPUT_DIR: ".torchbench/v2-nightly-ci"
BISECTION_ROOT: ".torchbench/v2-bisection-ci"
CUDA_VERSION: "cu121"
SCRIBE_GRAPHQL_ACCESS_TOKEN: ${{ secrets.SCRIBE_GRAPHQL_ACCESS_TOKEN }}
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
IS_GHA: 1
AWS_DEFAULT_REGION: us-east-1
BUILD_ENVIRONMENT: benchmark-nightly
if: ${{ github.repository_owner == 'pytorch' }}
runs-on: [self-hosted, bm-runner]
steps:
- name: Checkout
uses: actions/checkout@v3
with:
ref: v2.0
- name: Create conda env
run: |
conda create -y -q --name "${CONDA_ENV_NAME}" python=${{ env.PYTHON_VER }}
- name: Install PyTorch nightly
run: |
. activate "${CONDA_ENV_NAME}"
. /data/nvme/bin/setup_instance.sh
# Install dependencies
pip install requests bs4 argparse gitpython boto3 regex
# Check if nightly builds are available
NIGHTLIES=$(python torchbenchmark/util/torch_nightly.py --packages torch)
# If failed, the script will generate empty result
if [ -z $NIGHTLIES ]; then
echo "Torch nightly build failed. Cancel the workflow."
exit 1
fi
# Install magma
conda install -y -c pytorch "${MAGMA_VERSION}"
# Install PyTorch nightly from pip
pip install --no-cache-dir --pre torch torchvision torchaudio --index-url \
https://download.pytorch.org/whl/nightly/${CUDA_VERSION}
- name: Install other TorchBench dependencies
run: |
. activate "${CONDA_ENV_NAME}"
. /data/nvme/bin/setup_instance.sh
conda install -y git-lfs
python install.py
- name: Run benchmark
run: |
. activate "${CONDA_ENV_NAME}"
. /data/nvme/bin/setup_instance.sh
WORKFLOW_HOME="${HOME}/${{ env.OUTPUT_DIR }}/gh${GITHUB_RUN_ID}"
bash ./.github/scripts/run.sh "${WORKFLOW_HOME}"
- name: Generate the bisection config
run: |
set -x
. activate "${CONDA_ENV_NAME}"
WORKFLOW_HOME="${HOME}/${{ env.OUTPUT_DIR }}/gh${GITHUB_RUN_ID}"
mkdir -p benchmark-output/
# Update the self-hosted pytorch version
pushd "${HOME}/pytorch"
git fetch origin
popd
pip install gitpython pyyaml dataclasses argparse
# Compare the result from yesterday and report any perf signals
python ./.github/scripts/generate-abtest-config.py \
--pytorch-dir "${HOME}/pytorch" \
--github-issue "${WORKFLOW_HOME}/gh-issue.md" \
--benchmark-dir "${WORKFLOW_HOME}" \
--out "${WORKFLOW_HOME}/bisection.yaml"
# Include in the GitHub artifact
if [ -f "${WORKFLOW_HOME}/gh-issue.md" ]; then
cp "${WORKFLOW_HOME}/bisection.yaml" ./benchmark-output/
cp "${WORKFLOW_HOME}/gh-issue.md" ./benchmark-output/
# Setup the bisection environment
BISECTION_HOME="${HOME}/${{ env.BISECTION_ROOT }}/bisection-gh${GITHUB_RUN_ID}"
mkdir -p "${BISECTION_HOME}"
mv ./benchmark-output/gh-issue.md "${BISECTION_HOME}/gh-issue.md"
cp ./benchmark-output/bisection.yaml "${BISECTION_HOME}/config.yaml"
fi
- name: Dispatch the bisection workflow
if: env.TORCHBENCH_PERF_SIGNAL
run: |
# Get the workflow ID from
# https://api.github.com/repos/pytorch/benchmark/actions/workflows
curl -u xuzhao9:${{ secrets.TORCHBENCH_ACCESS_TOKEN }} \
-X POST \
-H "Accept: application/vnd.github.v3+json" \
https://api.github.com/repos/pytorch/benchmark/actions/workflows/16176850/dispatches \
-d '{"ref": "main", "inputs": {"issue_name": "bisection-gh'"${GITHUB_RUN_ID}"'" } }'
- name: Copy artifact and upload to scribe
run: |
. activate "${CONDA_ENV_NAME}"
TODAY=$(date "+%Y%m%d%H%M%S")
LATEST_RESULT=$(find ${HOME}/${{ env.OUTPUT_DIR }}/gh${GITHUB_RUN_ID} -name "*.json" | sort -r | head -1)
echo "Benchmark result file: ${LATEST_RESULT}"
mkdir -p benchmark-output/
cp "${LATEST_RESULT}" ./benchmark-output/benchmark-result-${CONFIG_VER}-${TODAY}.json
# Load environment variables
CONFIG_DIR=torchbenchmark/score/configs/${CONFIG_VER}
CONFIG_ENV=${CONFIG_DIR}/config-${CONFIG_VER}.env
# Load environment variables
set -a; source "${CONFIG_ENV}"; set +a
SCORE_FILE="./benchmark-result-${CONFIG_VER}-score-${TODAY}.json"
# Generate score file
python compute_score.py --score_version "${CONFIG_VER}" --benchmark_data_file "${LATEST_RESULT}" --output-json "${SCORE_FILE}"
# Upload result to Scribe
python scripts/upload_scribe_${CONFIG_VER}.py --pytest_bench_json "${LATEST_RESULT}" --torchbench_score_file "${SCORE_FILE}"
- name: Upload artifact
uses: actions/upload-artifact@v3
with:
name: Benchmark result
path: benchmark-output/
- name: Destroy conda env
run: |
conda env remove --name "${CONDA_ENV_NAME}"