Skip to content

Commit 865d2c8

Browse files
committed
feat: performance tests
1 parent 9d271bb commit 865d2c8

File tree

7 files changed

+192
-11
lines changed

7 files changed

+192
-11
lines changed
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
name: Create Performance Benchmarks
2+
description: |
3+
This workflow runs performance benchmarks on the specified runner tags and tests
4+
and creates a baseline for future comparisons. It uses pytest-benchmark to run the tests
5+
and save the results.
6+
It can also create a pull request with the results if specified.
7+
8+
on:
9+
workflow_dispatch:
10+
inputs:
11+
create_pr:
12+
description: "Create PR with results (true/false)"
13+
required: false
14+
default: "false"
15+
pull_request:
16+
path: |
17+
.github/workflows/create-perf-benchmarks.yaml
18+
19+
jobs:
20+
benchmark:
21+
name: Run Benchmarks for ${{ matrix.os }}-${{ matrix.arch }}
22+
strategy:
23+
matrix:
24+
os: ["ubuntu:22.04", "ubuntu:24.04"]
25+
arch: ["amd64", "arm64"]
26+
steps:
27+
- name: Checkout repo
28+
uses: actions/checkout@v4
29+
- name: Setup Python
30+
uses: actions/setup-python@v5
31+
with:
32+
python-version: "3.12"
33+
- name: Run benchmarks
34+
run: |
35+
tox -e create_benchmark_baseline -- --baseline-directory=tests/integration/tests/performance/benchmarks --baseline-name=${ matrix.os }-${{ matrix.arch }}
36+
37+
steps:
38+
- name: Checkout repo
39+
uses: actions/checkout@v4
40+
41+
- name: Download artifact
42+
uses: actions/download-artifact@v4
43+
with:
44+
name: benchmark-results
45+
path: tests/integration/
46+
47+
- name: Commit and create PR
48+
uses: peter-evans/create-pull-request@v6
49+
with:
50+
commit-message: "Add benchmark baseline results"
51+
title: "Benchmark Baselines"
52+
body: "Automated benchmark results from manual run"
53+
branch: benchmark/baseline-results
54+
labels: |
55+
benchmark
56+
automation
57+
token: ${{ secrets.GITHUB_TOKEN }}

.github/workflows/nightly-test.yaml

Lines changed: 23 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -13,20 +13,33 @@ permissions:
1313
contents: read
1414

1515
jobs:
16-
test-integration:
17-
name: Integration
16+
test-performance:
17+
name: Run performance tests for ${{ matrix.os }}-${{ matrix.arch }}
1818
strategy:
1919
matrix:
2020
os: ["ubuntu:22.04", "ubuntu:24.04"]
2121
arch: ["amd64", "arm64"]
22-
channel: ["latest/edge"]
23-
fail-fast: false # TODO: remove once we no longer have flaky tests.
24-
uses: ./.github/workflows/e2e-tests.yaml
25-
with:
26-
arch: ${{ matrix.arch }}
27-
os: ${{ matrix.os }}
28-
channel: ${{ matrix.channel }}
29-
test-tags: 'up_to_nightly conformance_tests'
22+
runs-on: ${{ matrix.arch == 'arm64' && 'self-hosted-linux-arm64-noble-large' || 'self-hosted-linux-amd64-noble-large' }}
23+
steps:
24+
- name: Checkout repo
25+
uses: actions/checkout@v4
26+
- name: Setup Python
27+
uses: actions/setup-python@v5
28+
with:
29+
python-version: "3.12"
30+
- name: Run benchmarks
31+
env:
32+
TEST_SUBSTRATE: lxd
33+
TEST_LXD_IMAGE: ${{ matrix.os }}
34+
run: |
35+
36+
tox -e create_benchmark_baseline -- --baseline-directory=benchmark-results --baseline-name=${ matrix.os }-${{ matrix.arch }}
37+
38+
- name: Upload benchmark results
39+
uses: actions/upload-artifact@v4
40+
with:
41+
name: benchmark-results
42+
path: benchmark-results/
3043

3144
Trivy:
3245
permissions:

tests/integration/tests/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@
1414

1515
LOG = logging.getLogger(__name__)
1616

17-
pytest_plugins = ("pytest_tagging",)
17+
pytest_plugins = ("pytest_tagging", "pytest-benchmark")
1818

1919

2020
def pytest_itemcollected(item):
Lines changed: 62 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,62 @@
1+
#!/usr/bin/env python3
2+
import argparse
3+
import os
4+
import subprocess
5+
6+
7+
def _prepare_baseline_directory(baseline_directory: str, baseline_name: str):
8+
"""
9+
Prepare the baseline directory by removing existing baselines with the
10+
specified baseline name and creating the directory if it doesn't exist.
11+
"""
12+
if os.path.exists(baseline_directory):
13+
for root, _, files in os.walk(baseline_directory):
14+
for file in files:
15+
file_path = os.path.join(root, file)
16+
if baseline_name in file:
17+
os.unlink(file_path)
18+
os.makedirs(baseline_directory, exist_ok=True)
19+
20+
return baseline_directory
21+
22+
23+
def run_benchmarks(baseline_directory: str, baseline_name: str):
24+
print(
25+
f"Running benchmark tests and store results as {baseline_directory}/{baseline_name}"
26+
)
27+
28+
subprocess.run(
29+
[
30+
"tox",
31+
"-e",
32+
"integration",
33+
"--",
34+
"--tags",
35+
"performance",
36+
f"--benchmark-save={baseline_name}",
37+
f"--benchmark-storage={baseline_directory}",
38+
],
39+
check=True,
40+
)
41+
42+
43+
def main():
44+
parser = argparse.ArgumentParser(
45+
description="Create performance benchmarks using pytest-benchmark."
46+
)
47+
parser.add_argument(
48+
"--baseline-name", help="Name for saved benchmark results", required=True
49+
)
50+
parser.add_argument(
51+
"--baseline-directory",
52+
help="Directory for saved benchmark results",
53+
required=True,
54+
)
55+
args = parser.parse_args()
56+
_prepare_baseline_directory(args.baseline_directory, args.baseline_name)
57+
run_benchmarks(args.baseline_directory, args.baseline_name)
58+
print(f"All benchmarks saved in {args.baseline_directory}/{args.baseline_name}.")
59+
60+
61+
if __name__ == "__main__":
62+
main()
Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
import pytest
2+
from typing import List
3+
from test_util import config, harness, tags, util
4+
5+
@pytest.mark.node_count(1)
6+
@pytest.mark.no_setup()
7+
@pytest.mark.tags(tags.PERFORMANCE)
8+
def test_perf_clustering_bootstrap_cli(instances: List[harness.Instance], tmp_path: str, benchmark):
9+
node = instances[0]
10+
11+
def setup():
12+
# TODO(ben): benchmark `teardown` function is implemented but not yet released
13+
# in 5.1.0. Once released, we can move this teardown logic in a separate function.
14+
# See https://github.com/ionelmc/pytest-benchmark/issues/270
15+
node.exec(["snap", "remove", "k8s", "--purge"])
16+
util.setup_k8s_snap(node, tmp_path)
17+
18+
def run():
19+
node.exec(["k8s", "bootstrap"])
20+
21+
benchmark.pedantic(run, setup=setup, rounds=config.PERF_DEFAULT_ROUNDS)
Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,20 @@
1+
import pytest
2+
from typing import List
3+
from test_util import config, harness, tags, util
4+
5+
@pytest.mark.node_count(1)
6+
@pytest.mark.tags(tags.PULL_REQUEST, tags.PERFORMANCE)
7+
def test_perf_status_single_node_cli(instances: List[harness.Instance], tmp_path: str, benchmark):
8+
node = instances[0]
9+
10+
def run():
11+
node.exec(["k8s", "status"])
12+
13+
benchmark.pedantic(run, rounds=20)
14+
15+
@pytest.mark.tags(tags.PULL_REQUEST, tags.PERFORMANCE)
16+
def test_perf_short(benchmark):
17+
def run():
18+
print("short")
19+
20+
benchmark.pedantic(run, rounds=20)

tests/integration/tox.ini

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,15 @@ commands =
4444
{posargs}
4545
commands_post =
4646
subunit2html "{toxinidir}/subunit.out" "{toxinidir}/subunit.html"
47+
passenv =
48+
TEST_*
4749

50+
[testenv:create_benchmark_baselines]
51+
description = Generate baselines for performance tests
52+
deps =
53+
-r {toxinidir}/requirements-test.txt
54+
commands =
55+
python3 {toxinidir}/tests/performance/create_benchmark_baselines.py {posargs}
4856
passenv =
4957
TEST_*
5058

0 commit comments

Comments
 (0)