Skip to content

Epic - Recovery & Refreshing #594

Epic - Recovery & Refreshing

Epic - Recovery & Refreshing #594

Workflow file for this run

name: Workspace
on:
pull_request:
paths-ignore:
- README.md
push:
branches:
- main
paths-ignore:
- README.md
tags:
- v*
env:
CARGO_INCREMENTAL: 0
RUSTFLAGS: "-Dwarnings"
jobs:
check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@v1
with:
toolchain: stable
components: clippy, rustfmt
- uses: actions/cache@v3
continue-on-error: false
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: ${{ runner.os }}-cargo-
- run: cargo clippy --all --all-features -- -D warnings
- run: cargo fmt --all -- --check
# TODO: Re-enable after bumping MSRV to 1.7.0+
# - uses: bnjbvr/cargo-machete@main
# - name: Install cargo-sort
# uses: baptiste0928/cargo-install@v1
# with:
# crate: cargo-sort
# - run: cargo sort --check
test:
runs-on: ubuntu-latest
needs: [ check ]
strategy:
matrix:
include:
- target: x86_64-unknown-linux-gnu
rust: 1.67 # MSRV, `cargo msrv`
- target: x86_64-unknown-linux-gnu
rust: stable
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@v1
with:
toolchain: ${{ matrix.rust }}
targets: ${{ matrix.target }}
- uses: actions/cache@v3
continue-on-error: false
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: ${{ runner.os }}-cargo-
- run: ${{ matrix.deps }}
- run: cargo check --all-features
# We're disabling default features here because pyo3/extension-module causes linking issues when testing
# See https://pyo3.rs/v0.13.2/faq.html#i-cant-run-cargo-test-im-having-linker-issues-like-symbol-not-found-or-undefined-reference-to-_pyexc_systemerror
- run: cargo test --release --no-default-features
wasm-test:
runs-on: ubuntu-latest
strategy:
matrix:
rust:
- 1.67 # MSRV
- stable
target:
- wasm32-unknown-unknown
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@v1
with:
toolchain: ${{ matrix.rust }}
targets: ${{ matrix.target }}
- run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
- run: wasm-pack test --node
working-directory: ferveo-wasm
yarn-test:
runs-on: ubuntu-latest
strategy:
matrix:
rust:
- stable
target:
- wasm32-unknown-unknown
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@v1
with:
toolchain: ${{ matrix.rust }}
targets: ${{ matrix.target }}
- run: curl https://rustwasm.github.io/wasm-pack/installer/init.sh -sSf | sh
- run: wasm-pack build --target nodejs
working-directory: ferveo-wasm
- uses: borales/[email protected]
with:
cmd: --cwd ferveo-wasm/examples/node install
- uses: borales/[email protected]
with:
cmd: --cwd ferveo-wasm/examples/node build
- uses: borales/[email protected]
with:
cmd: --cwd ferveo-wasm/examples/node test
python-test:
runs-on: ubuntu-latest
strategy:
matrix:
rust:
- stable
target:
- wasm32-unknown-unknown
python:
- "3.11"
- "3.12"
steps:
- uses: actions/checkout@v3
- uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python }}
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@v1
with:
toolchain: ${{ matrix.rust }}
targets: ${{ matrix.target }}
- name: Install Ferveo Python package
run: pip install -e .
working-directory: ferveo-python
- name: Run Python Ferveo examples (server_api_precomputed)
run: python examples/server_api_precomputed.py
working-directory: ferveo-python
- name: Run Python Ferveo examples (server_api_simple)
run: python examples/server_api_simple.py
working-directory: ferveo-python
- name: Install pip dependencies
run: pip install pytest mypy ruff
- name: Run pytest
run: pytest
working-directory: ferveo-python
- name: Run mypy.stubtest
run: python -m mypy.stubtest ferveo
working-directory: ferveo-python
- name: Run ruff
run: ruff check ferveo
working-directory: ferveo-python
codecov:
runs-on: ubuntu-latest
needs: [ test ]
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@v1
with:
toolchain: stable
targets: x86_64-unknown-linux-gnu
- name: Install cargo-llvm-cov
uses: taiki-e/install-action@cargo-llvm-cov
# Only checking the coverage of the main library,
# bindings are covered by their language's tests.
- name: Generate code coverage
run: cargo llvm-cov --workspace --lcov --output-path lcov.info --no-default-features
- name: Upload coverage to Codecov
uses: codecov/codecov-action@v3
with:
files: lcov.info
fail_ci_if_error: true
compile-benchmarks:
# Temporarily replaces "benchmark" job. TODO: Remove this job once the "benchmark" job is fixed.
runs-on: ubuntu-latest
needs: [ test ]
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@v1
with:
toolchain: stable
targets: x86_64-unknown-linux-gnu
- name: Compile benchmarks
run: cargo bench --no-run --no-default-features
benchmark:
# TODO: This job is failing with error:
# "No benchmark result was found in /home/runner/work/ferveo/ferveo/benchmarks/perf/tpke/output.txt. Benchmark output was '"
# For example, see: https://github.com/nucypher/ferveo/actions/runs/4790448140/jobs/8519706701
# TODO: Fix or remove this job from the workflow.
if: ${{ false }}
runs-on: ubuntu-latest
needs: [ test ]
strategy:
matrix:
component:
- tpke
steps:
- uses: actions/checkout@v3
- uses: dtolnay/rust-toolchain@v1
with:
toolchain: nightly
targets: x86_64-unknown-linux-gnu
- uses: actions/cache@v3
continue-on-error: false
with:
# Not caching ~/.cargo/bin/ because it breaks `cargo install critcmp` in `boa-dev/criterion-compare-action@v3`
path: |
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
restore-keys: ${{ runner.os }}-cargo-
- name: Run benchmark for base branch comparison
uses: boa-dev/criterion-compare-action@v3
# Only PRs to main
if: github.event_name == 'pull_request' && github.ref == 'refs/heads/main' && github.repository == 'nucypher/ferveo'
with:
cwd: ${{ matrix.component }}
branchName: ${{ github.base_ref }}
features: "test-common"
# The next steps have been adapted from https://raw.githubusercontent.com/unicode-org/icu4x/main/.github/workflows/build-test.yml
# Benchmarking & dashboards job > Run benchmark.
- name: Install cargo-criterion
if: github.event_name == 'push' && github.ref == 'refs/heads/main' && github.repository == 'nucypher/ferveo'
run: cargo install cargo-criterion
- name: Run benchmark for dashboard
# only merges to main (implies PR is finished and approved by this point)
if: github.event_name == 'push' && github.ref == 'refs/heads/main' && github.repository == 'nucypher/ferveo'
run: |
pushd $PWD && cd ${{ matrix.component }};
export REL_OUTPUT_PATH="`dirs +1`/benchmarks/perf/${{ matrix.component }}";
eval OUTPUT_PATH=$REL_OUTPUT_PATH;
mkdir -p $OUTPUT_PATH;
# Criterion outputs the actual bench results to stderr "2>&1 tee output.txt" takes stderr,
# passes to tee which displays it in the terminal and writes to output.txt
cargo criterion --output-format bencher 2>&1 | tee -a $OUTPUT_PATH/output.txt;
echo "Saved output to $OUTPUT_PATH/output.txt";
popd
# In the following step(s) regarding converting benchmark output to dashboards, the branch in `gh-pages-branch` needs to exist.
# If it doesn't already exist, it should be created by someone with push permissions, like so:
# # Create a local branch
# $ git checkout --orphan <newbranch>
# $ git commit --allow-empty -m "root commit"
# # Push it to create a remote branch
# $ git push origin <newbranch>:<newbranch>
# Benchmarking & dashboards job > (PR merge to main only) Convert benchmark output into dashboard HTML in a commit of a branch of the local repo.
- name: Store benchmark result & create dashboard (merge to main only)
# only merges to main (implies PR is finished and approved by this point)
if: github.event_name == 'push' && github.ref == 'refs/heads/main' && github.repository == 'nucypher/ferveo'
uses: benchmark-action/github-action-benchmark@v1
with:
name: Rust Benchmark
tool: 'cargo'
output-file-path: ./benchmarks/perf/${{ matrix.component }}/output.txt
benchmark-data-dir-path: ./benchmarks/perf/${{ matrix.component }}
# Show alert with commit comment on detecting possible performance regression
alert-threshold: '200%' # If for nothing else, enabling the possibility of alerts with meaningful thresholds requires this job to be done per-component
fail-on-alert: true
gh-pages-branch: merged-bench-data # Requires one-time-only creation of this branch on remote repo.
auto-push: true # Use the branch at `gh-pages-branch` to store historical info of benchmark data.
github-token: ${{ secrets.GITHUB_TOKEN }}
comment-on-alert: true
alert-comment-cc-users: '@piotr-roslaniec'