diff --git a/.xrefcheck.yaml b/.xrefcheck.yaml deleted file mode 100644 index a1c87cb6104..00000000000 --- a/.xrefcheck.yaml +++ /dev/null @@ -1,63 +0,0 @@ -# Parameters of repository traversal. -traversal: - # Files and folders which we pretend do not exist - # (so they are neither analyzed nor can be referenced). - ignored: - # Git files - - .git - # Build artifacts - - _build - - _opam - # Git submodules - - src/external - - src/lib/marlin - - src/lib/crypto/proof-systems - - src/lib/snarky - - frontend/wallet/tablecloth - # Unsure of the relevance anymore - - frontend/wallet/README.md - -# Verification parameters. -verification: - # On 'anchor not found' error, how much similar anchors should be displayed as - # hint. Number should be between 0 and 1, larger value means stricter filter. - anchorSimilarityThreshold: 0.5 - - # When checking external references, how long to wait on request before - # declaring "Response timeout". - externalRefCheckTimeout: 10s - - # Prefixes of files, references in which should not be analyzed. - notScanned: - - .github/pull_request_template.md - - .github/issue_template.md - - .github/PULL_REQUEST_TEMPLATE - - .github/ISSUE_TEMPLATE - - # Glob patterns describing the files which do not physically exist in the - # repository but should be treated as existing nevertheless. - virtualFiles: - - ../../../issues - - ../../../issues/* - - ../../../pulls - - ../../../pulls/* - - # POSIX extended regular expressions that match external references - # that have to be ignored (not verified). - # It is an optional parameter, so it can be omitted. - ignoreRefs: - - "https://github.com/.*" # Otherwise Resource unavailable (429 too many requests) - - # Check localhost links. - checkLocalhost: false - - # Skip links which return 403 or 401 code. - ignoreAuthFailures: true - -# Parameters of scanners for various file types. -scanners: - markdown: - # Flavor of markdown, e.g. GitHub-flavor. - # - # This affects which anchors are generated for headers. - flavor: GitHub diff --git a/README.md b/README.md index b848104e1d8..66a29467441 100644 --- a/README.md +++ b/README.md @@ -1,14 +1,18 @@ -### Build status + -| Develop | Berkeley | Compatible | -| ------- | -------- | ---------- | -| [![Build status - develop](https://badge.buildkite.com/0c47452f3ea619d3217d388e0de522b218db28c3e161887a9a.svg?branch=develop)](https://buildkite.com/o-1-labs-2/mina-end-to-end-nightlies) | [![Build status - berkeley](https://badge.buildkite.com/0c47452f3ea619d3217d388e0de522b218db28c3e161887a9a.svg?branch=berkeley)](https://buildkite.com/o-1-labs-2/mina-end-to-end-nightlies) | [![Build status - compatible](https://badge.buildkite.com/0c47452f3ea619d3217d388e0de522b218db28c3e161887a9a.svg?branch=compatible)](https://buildkite.com/o-1-labs-2/mina-end-to-end-nightlies) +

Mina

- - Mina logo - +
-# Mina + ![GitHub stars](https://img.shields.io/github/stars/minaprotocol/mina)  ![GitHub forks](https://img.shields.io/github/forks/minaprotocol/mina) + +![GitHub contributors](https://img.shields.io/github/contributors/minaprotocol/mina)  ![GitHub commit activity](https://img.shields.io/github/commit-activity/m/minaprotocol/mina)  ![GitHub last commit](https://img.shields.io/github/last-commit/minaprotocol/mina) + +| Develop[^1] | Compatible[^2] | Master[^3] | +| ------- | ---------- | ---------- | +| [![Build status - develop](https://badge.buildkite.com/0c47452f3ea619d3217d388e0de522b218db28c3e161887a9a.svg?branch=develop)](https://buildkite.com/o-1-labs-2/mina-end-to-end-nightlies) | [![Build status - compatible](https://badge.buildkite.com/0c47452f3ea619d3217d388e0de522b218db28c3e161887a9a.svg?branch=compatible)](https://buildkite.com/o-1-labs-2/mina-end-to-end-nightlies) | [![Build status - master](https://badge.buildkite.com/0c47452f3ea619d3217d388e0de522b218db28c3e161887a9a.svg?branch=master)](https://buildkite.com/o-1-labs-2/mina-end-to-end-nightlies) + +
Mina is the first cryptocurrency with a lightweight, constant-sized blockchain. This is the main source code repository for the Mina project and contains code for the OCaml protocol implementation, the [Mina Protocol website](https://minaprotocol.com), and wallet. Enjoy! @@ -60,3 +64,7 @@ The [Node Developers](https://docs.minaprotocol.com/node-developers) docs contai [Apache 2.0](LICENSE) Commits older than 2018-10-03 do not have a [LICENSE](LICENSE) file or this notice, but are distributed under the same terms. + +[^1]: Develop is a mainline branch containing code that may be not compatible with current mainnet and may require major upgrade (hardfork). +[^2]: Compatible is a mainline branch containing code which does not need hardfork in order to apply it to mainnet. +[^3]: Branch which contains current mainnet code. diff --git a/automation/services/mina-bp-stats/.gitignore b/automation/services/mina-bp-stats/.gitignore deleted file mode 100644 index 62c893550ad..00000000000 --- a/automation/services/mina-bp-stats/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.idea/ \ No newline at end of file diff --git a/automation/services/mina-bp-stats/ingest-lambda/README.md b/automation/services/mina-bp-stats/ingest-lambda/README.md deleted file mode 100644 index 15ccc99c224..00000000000 --- a/automation/services/mina-bp-stats/ingest-lambda/README.md +++ /dev/null @@ -1,10 +0,0 @@ -# Mina Block Producer Ingest Lambda - -This is a simple ingestion lambda that tags incoming stats data and lands things in a GCS bucket. - -## Configuration - -This lambda takes in 2 environment variables that should be configured in the google console. - -- `TOKEN` - The token used to authenticate incoming requests -- `GOOGLE_STORAGE_BUCKET` - The GCS bucket to store incoming data in diff --git a/automation/services/mina-bp-stats/ingest-lambda/index.js b/automation/services/mina-bp-stats/ingest-lambda/index.js deleted file mode 100644 index 4ddfbe466d9..00000000000 --- a/automation/services/mina-bp-stats/ingest-lambda/index.js +++ /dev/null @@ -1,44 +0,0 @@ -const {Storage} = require('@google-cloud/storage'); - -exports.handleRequest = async (req, res) => { - if (process.env.TOKEN === undefined){ - return res.status(500).send("TOKEN envar not set") - } - if (process.env.GOOGLE_STORAGE_BUCKET === undefined){ - return res.status(500).send("GOOGLE_STORAGE_BUCKET envar not set") - } - - if (!req.query.token || req.query.token !== process.env.TOKEN){ - return res.status(401).send("Bad token") - } - - const now = new Date() - const dateStamp = now.toISOString().split('T')[0] - - const ipAddress = req.headers['x-forwarded-for'] || req.connection.remoteAddress - const receivedAt = now.getTime() - - const recvPayload = req.body - - const bpKeys = recvPayload.daemonStatus.blockProductionKeys - - if (bpKeys.length === 0){ - return res.status(400).send("Invalid block production keys") - } - - const payload = { - receivedAt, - receivedFrom: ipAddress, - blockProducerKey: bpKeys[0], - nodeData: recvPayload - } - - // Upload to gstorage - const storage = new Storage() - const myBucket = storage.bucket(process.env.GOOGLE_STORAGE_BUCKET) - const file = myBucket.file(`${dateStamp}.${now.getTime()}.${recvPayload.blockHeight}.json`) - const contents = JSON.stringify(payload, null, 2) - await file.save(contents, {contentType: "application/json"}) - - return res.status(200).send("OK") -}; diff --git a/automation/services/mina-bp-stats/ingest-lambda/package.json b/automation/services/mina-bp-stats/ingest-lambda/package.json deleted file mode 100644 index e07d3274999..00000000000 --- a/automation/services/mina-bp-stats/ingest-lambda/package.json +++ /dev/null @@ -1,7 +0,0 @@ -{ - "name": "mina-bp-ingest", - "version": "1.0.0", - "dependencies": { - "@google-cloud/storage": "^5.8.1" - } -} diff --git a/automation/services/mina-bp-stats/sidecar/.gitignore b/automation/services/mina-bp-stats/sidecar/.gitignore deleted file mode 100644 index 12d6a9c3220..00000000000 --- a/automation/services/mina-bp-stats/sidecar/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -*.deb -deb_build diff --git a/automation/services/mina-bp-stats/sidecar/Dockerfile b/automation/services/mina-bp-stats/sidecar/Dockerfile deleted file mode 100644 index c87006499ff..00000000000 --- a/automation/services/mina-bp-stats/sidecar/Dockerfile +++ /dev/null @@ -1,5 +0,0 @@ -FROM python:alpine - -COPY sidecar.py /opt/sidecar.py - -CMD python3 /opt/sidecar.py \ No newline at end of file diff --git a/automation/services/mina-bp-stats/sidecar/README.md b/automation/services/mina-bp-stats/sidecar/README.md deleted file mode 100644 index c18ad9a270f..00000000000 --- a/automation/services/mina-bp-stats/sidecar/README.md +++ /dev/null @@ -1,155 +0,0 @@ -# Mina Block Producer Metrics Sidecar - -This is a simple sidecar that communicates with Mina nodes to ship off uptime data for analysis. - -Unless you're a founding block producer, you shouldn't need to run this sidecar, and you'll need to talk with the Mina team to get a special URL to make it work properly. - -## Configuration - -The sidecar takes 2 approaches to configuration, a pair of envars, or a configuration file. - -**Note**: Environment variables always take precedence, even if the config file is available and valid. - -#### Envars -- `MINA_BP_UPLOAD_URL` - The URL to upload block producer statistics to -- `MINA_NODE_URL` - The URL that the sidecar will reach out to to get statistics from - -#### Config File -[config-file]: #config-file -The mina metrics sidecar will also look at `/etc/mina-sidecar.json` for its configuration variables, and the file should look like this: - -``` -{ - "uploadURL": "https://your.upload.url.here?token=someToken", - "nodeURL": "https://your.mina.node.here:4321" -} -``` - -The `uploadURL` parameter should be given to you by the Mina engineers - -## Running with Docker -Running in docker should be as straight forward as running any other docker image. - -#### Pulling from dockerhub -We push updates to `minaprotocol/mina-bp-stats-sidecar:latest` so you can simply run the following to pull the image down: - -``` -$ docker pull minaprotocol/mina-bp-stats-sidecar:latest -``` - -#### Building locally -This is un-necessary if you use the version from dockerhub (which is recommended). - -If you want to build this image yourself though, you can run `docker build -t mina-sidecar .` in this folder to build the image while naming it "mina-sidecar". - -You should then substitute that in lieu of `minaprotocol/mina-bp-stats-sidecar:latest` for the rest of the commands below. - -#### Running with envars -```bash -$ docker run --rm -it -e MINA_BP_UPLOAD_URL=https://some-url-here -e MINA_NODE_URL=https://localhost:4321 minaprotocol/mina-bp-stats-sidecar:latest -``` - -#### Running with a config file -```bash -$ docker run --rm -it -v $(pwd)/mina-sidecar.json:/etc/mina-sidecar.json minaprotocol/mina-bp-stats-sidecar:latest -``` -#### You can even bake your own docker image with the config file already in it -```bash -# Copy the example and make edits -$ cp mina-sidecar-example.json mina-sidecar.json -$ vim mina-sidecar.json # Make edits to the config -# Create custom Dockerfile -$ cat < Dockerfile.custom -FROM minaprotocol/mina-bp-stats-sidecar:latest -COPY your_custom_config.conf /etc/mina-sidecar.json -EOF -$ docker build -t your-custom-sidecar -f Dockerfile.custom . -$ docker run --rm -it your-custom-sidecar -``` - -## Running with debian package - -Running the sidecar as a debian package is as simple as installing the package, editing the config file, and enabling the service. - -#### Installing the package - -This package will install 3 files: - -- `/usr/local/bin/mina-bp-stats-sidecar` (the mina sidecar program) -- `/etc/mina-sidecar.json` (the config file for the mina sidecar) -- `/etc/systemd/system/mina-bp-stats-sidecar.service` (the systemd config to run it as a service) - -Installing the deb directly should be done with `apt install`, which will install the dependencies along side the service: - -``` -$ apt install ./mina-bp-stats-sidecar.deb -``` - -If you prefer to use `dpkg`, you can do so after installing the dependencies: - -``` -$ apt-get update && apt-get install python3 python3-certifi -$ dpkg -i ./mina-bp-stats-sidecar.deb -``` - -#### Configuring and Running - -See the [Config File](#config-file) section above for what should be in the `/etc/mina-sidecar.json` file. - -To (optionally) enable the service to run on reboot you can use: - -``` -$ systemctl enable mina-bp-stats-sidecar -``` - -Then to start the service itself: - -``` -$ service mina-bp-stats-sidecar start -``` - -From there you can check that it's running and see the most recent logs with `service mina-bp-stats-sidecar status`: - -``` -$ service mina-bp-stats-sidecar status -● mina-bp-stats-sidecar.service - Mina Block Producer Stats Sidecar - Loaded: loaded (/etc/systemd/system/mina-bp-stats-sidecar.service; disabled; vendor preset: enabled) - Active: active (running) since Fri 2021-03-12 02:43:37 CET; 3s ago - Main PID: 1906 (python3) - Tasks: 1 (limit: 2300) - CGroup: /system.slice/mina-bp-stats-sidecar.service - └─1906 python3 /usr/local/bin/mina-bp-stats-sidecar - -INFO:root:Found /etc/mina-sidecar.json on the filesystem, using config file -INFO:root:Starting Mina Block Producer Sidecar -INFO:root:Fetching block 2136... -INFO:root:Got block data -INFO:root:Finished! New tip 2136... -``` - -#### Monitoring/Logging - -If you want to get logs from the sidecar service, you can use `journalctl`: - -``` -# Similar to "tail -f" for the sidecar service -$ journalctl -f -u mina-bp-stats-sidecar.service -``` - -## Issues - -#### HTTP error 400 - -If you get a 400 while running your sidecar: - -``` -INFO:root:Fetching block 2136... -INFO:root:Got block data -ERROR:root:HTTP Error 400: Bad Request - --- TRACEBACK -- - -ERROR:root:Sleeping for 30s and trying again -``` - -It likely means you're shipping off data to the ingest pipeline without any block producer key configured on your Mina node - since your BP key is your identity we can't accept node data since we don't know who is submitting it! diff --git a/automation/services/mina-bp-stats/sidecar/build.sh b/automation/services/mina-bp-stats/sidecar/build.sh deleted file mode 100755 index 0c243741da9..00000000000 --- a/automation/services/mina-bp-stats/sidecar/build.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash - -BUILDDIR="${BUILDDIR:-deb_build}" - -# Get CWD if run locally or run through "source" -CURRENT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" &> /dev/null && pwd )" - -rm -rf "${BUILDDIR}" - -mkdir -p "${BUILDDIR}/DEBIAN" - -cat << EOF > "${BUILDDIR}/DEBIAN/control" -Package: mina-bp-stats-sidecar -Version: ${MINA_DEB_VERSION} -License: Apache-2.0 -Vendor: none -Architecture: all -Maintainer: o(1)Labs -Installed-Size: -Depends: python3, python3-certifi -Section: base -Priority: optional -Homepage: https://minaprotocol.com/ -Description: A telemetry sidecar that ships stats about node status - back to Mina HQ for analysis. - Built from ${GITHASH} by ${BUILD_URL} -EOF - -mkdir -p "${BUILDDIR}/usr/local/bin" -mkdir -p "${BUILDDIR}/etc" -mkdir -p "${BUILDDIR}/etc/systemd/system/" - -cp "${CURRENT_DIR}/sidecar.py" "${BUILDDIR}/usr/local/bin/mina-bp-stats-sidecar" -cp "${CURRENT_DIR}/mina-sidecar-example.json" "${BUILDDIR}/etc/mina-sidecar.json" -cp "${CURRENT_DIR}/mina-bp-stats-sidecar.service" "${BUILDDIR}/etc/systemd/system/mina-bp-stats-sidecar.service" - -fakeroot dpkg-deb --build "${BUILDDIR}" "mina-sidecar_${MINA_DEB_VERSION}.deb" - -rm -rf "${BUILDDIR}" diff --git a/automation/services/mina-bp-stats/sidecar/mina-bp-stats-sidecar.service b/automation/services/mina-bp-stats/sidecar/mina-bp-stats-sidecar.service deleted file mode 100644 index d7fc212813c..00000000000 --- a/automation/services/mina-bp-stats/sidecar/mina-bp-stats-sidecar.service +++ /dev/null @@ -1,7 +0,0 @@ -[Unit] -Description=Mina Block Producer Stats Sidecar -[Service] -ExecStart=/usr/local/bin/mina-bp-stats-sidecar -SuccessExitStatus=143 -[Install] -WantedBy=multi-user.target \ No newline at end of file diff --git a/automation/services/mina-bp-stats/sidecar/mina-sidecar-example.json b/automation/services/mina-bp-stats/sidecar/mina-sidecar-example.json deleted file mode 100644 index 179a5c8c708..00000000000 --- a/automation/services/mina-bp-stats/sidecar/mina-sidecar-example.json +++ /dev/null @@ -1,4 +0,0 @@ -{ - "uploadURL": "https://some-host.somewhere/some-endpoing?token=some-token", - "nodeURL": "https://some.node.somewhere:3085" -} diff --git a/automation/services/mina-bp-stats/sidecar/sidecar.py b/automation/services/mina-bp-stats/sidecar/sidecar.py deleted file mode 100755 index 2b356ca9329..00000000000 --- a/automation/services/mina-bp-stats/sidecar/sidecar.py +++ /dev/null @@ -1,182 +0,0 @@ -#!/usr/bin/env python3 - -import os -import json -import logging -import time -import math -import urllib.request -import urllib.parse - -logging.basicConfig(level=logging.INFO) - -MINA_CONFIG_FILE = '/etc/mina-sidecar.json' -MINA_BLOCK_PRODUCER_URL_ENVAR = 'MINA_BP_UPLOAD_URL' -MINA_NODE_URL_ENVAR = 'MINA_NODE_URL' - -FETCH_INTERVAL = 60 * 3 # Fetch updates every 3 mins -ERROR_SLEEP_INTERVAL = 30 # On errors, sleep for 30s before trying again -FINALIZATION_THRESHOLD = 12 # 12 blocks back is considered "finalized" - -SYNC_STATUS_GRAPHQL = ''' -query SyncStatus { - daemonStatus { - syncStatus - blockchainLength - } -} -''' - -FETCH_BLOCK_GRAPHQL = ''' -query FetchBlockData($blockID: Int!) { - version - daemonStatus { - blockchainLength - syncStatus - chainId - commitId - highestBlockLengthReceived - highestUnvalidatedBlockLengthReceived - stateHash - blockProductionKeys - uptimeSecs - } - block(height: $blockID) { - stateHash - } -} -''' - -upload_url, node_url = (None, None) - -if os.path.exists(MINA_CONFIG_FILE): - logging.info("Found {} on the filesystem, using config file".format(MINA_CONFIG_FILE)) - with open(MINA_CONFIG_FILE) as f: - config_file = f.read().strip() - parsed_config_file = json.loads(config_file) - upload_url = parsed_config_file['uploadURL'].rstrip('/') - node_url = parsed_config_file['nodeURL'].rstrip('/') - -if MINA_BLOCK_PRODUCER_URL_ENVAR in os.environ: - logging.info("Found {} in the environment, using envar".format(MINA_BLOCK_PRODUCER_URL_ENVAR)) - upload_url = os.environ[MINA_BLOCK_PRODUCER_URL_ENVAR] - -if MINA_NODE_URL_ENVAR in os.environ: - logging.info("Found {} in the environment, using envar".format(MINA_NODE_URL_ENVAR)) - node_url = os.environ[MINA_NODE_URL_ENVAR] - -if upload_url is None: - raise Exception("Could not find {} or {} environment variable is not set.".format(MINA_CONFIG_FILE, MINA_BLOCK_PRODUCER_URL_ENVAR)) - -if node_url is None: - raise Exception("Could not find {} or {} environment variable is not set.".format(MINA_CONFIG_FILE, MINA_NODE_URL_ENVAR)) - -def fetch_mina_status(): - url = node_url + '/graphql' - request = urllib.request.Request( - url, - headers={'Content-Type': 'application/json'}, - data=json.dumps({ - "query": SYNC_STATUS_GRAPHQL, - "variables": {}, - "operationName": "SyncStatus" - }).encode() - ) - response = urllib.request.urlopen(request) - response_body = response.read().decode('utf-8') - parsed_body = json.loads(response_body)['data'] - - return parsed_body['daemonStatus']['syncStatus'], parsed_body['daemonStatus']['blockchainLength'] - -def fetch_block(block_id): - url = node_url + '/graphql' - request = urllib.request.Request( - url, - headers={'Content-Type': 'application/json'}, - data=json.dumps({ - "query": FETCH_BLOCK_GRAPHQL, - "variables": {'blockID': block_id}, - "operationName": "FetchBlockData" - }).encode() - ) - - response = urllib.request.urlopen(request) - response_body = response.read().decode('utf-8') - response_data = json.loads(response_body)['data'] - if response_data is None: - raise Exception("Response seems to be an error! {}".format(response_body)) - - return response_data - -def send_update(block_data, block_height): - block_data.update({ - "retrievedAt": math.floor(time.time() * 1000), - "blockHeight": block_height - }) - request = urllib.request.Request( - upload_url, - headers={'Content-Type': 'application/json'}, - data=json.dumps(block_data).encode() - ) - - response = urllib.request.urlopen(request) - - assert response.getcode() == 200, "Non-200 from BP flush endpoint! [{}] - ".format(response.getcode(), response.read()) - -def check_mina_node_sync_state_and_fetch_head(): - while True: - try: - mina_sync_status, current_head = fetch_mina_status() - if mina_sync_status == "SYNCED": - logging.debug("Mina sync status is acceptable ({}), continuing!".format(mina_sync_status)) - break - logging.info("Mina sync status is {}. Sleeping for 5s and trying again".format(mina_sync_status)) - except Exception as fetch_exception: - logging.exception(fetch_exception) - - time.sleep(5) - - return current_head - -if __name__ == '__main__': - logging.info("Starting Mina Block Producer Sidecar") - - # On init ensure our node is synced and happy - head_block_id = check_mina_node_sync_state_and_fetch_head() - - # Go back FINALIZATION_THRESHOLD blocks from the tip to have a finalized block - current_finalized_tip = head_block_id - FINALIZATION_THRESHOLD - - # We're done with init to the point where we can start shipping off data - while True: - try: - logging.info("Fetching block {}...".format(current_finalized_tip)) - - block_data = fetch_block(current_finalized_tip) - - logging.info("Got block data ", block_data) - - send_update(block_data, current_finalized_tip) - - current_finalized_tip = block_data['daemonStatus']['blockchainLength'] - FINALIZATION_THRESHOLD # Go set a new finalized block - - logging.info("Finished! New tip {}...".format(current_finalized_tip)) - - time.sleep(FETCH_INTERVAL) - except Exception as e: - # If we encounter an error at all, log it, sleep, and then kick - # off the init process to go fetch the current tip/head to ensure - # we never try to fetch past 290 blocks (k=290) - logging.exception(e) - - logging.error("Sleeping for {}s and trying again".format(ERROR_SLEEP_INTERVAL)) - - time.sleep(ERROR_SLEEP_INTERVAL) - - head_block_id = check_mina_node_sync_state_and_fetch_head() - - logging.info("Found new head at {}".format(head_block_id)) - - current_finalized_tip = head_block_id - FINALIZATION_THRESHOLD - - logging.info("Continuing with finalized tip block of {}".format(current_finalized_tip)) diff --git a/buildkite/Makefile b/buildkite/Makefile index 33de08c0af8..eff56f84029 100644 --- a/buildkite/Makefile +++ b/buildkite/Makefile @@ -14,4 +14,22 @@ lint: find ./src/ -name "*.dhall" -print0 | xargs -I{} -0 -n1 bash -c 'echo "{}" && dhall --ascii lint --inplace {} || exit 255' format: - find ./src/ -name "*.dhall" -print0 | xargs -I{} -0 -n1 bash -c 'echo "{}" && dhall --ascii format --inplace {} || exit 255' \ No newline at end of file + find ./src/ -name "*.dhall" -print0 | xargs -I{} -0 -n1 bash -c 'echo "{}" && dhall --ascii format --inplace {} || exit 255' + +dump_pipelines: + $(eval TMP := $(shell mktemp -d)) + scripts/dhall/dump_dhall_to_pipelines.sh src/Jobs "$(TMP)" + +check_deps: dump_pipelines + python3 scripts/dhall/checker.py --root "$(TMP)" deps + +check_dirty: dump_pipelines + python3 scripts/dhall/checker.py --root "$(TMP)" dirty-when --repo "$(PWD)/../" + +check_dups: dump_pipelines + python3 scripts/dhall/checker.py --root "$(TMP)" dups + +check_names: dump_pipelines + python3 scripts/dhall/checker.py --root "$(TMP)" names + +all: check_syntax lint format check_deps check_dirty check_dups check_names \ No newline at end of file diff --git a/buildkite/scripts/bench/install.sh b/buildkite/scripts/bench/install.sh new file mode 100644 index 00000000000..ea06e6ee02b --- /dev/null +++ b/buildkite/scripts/bench/install.sh @@ -0,0 +1,18 @@ +#!/bin/bash + +set -eo pipefail + +# Don't prompt for answers during apt-get install +export DEBIAN_FRONTEND=noninteractive + +sudo apt-get update +sudo apt-get install -y git apt-transport-https ca-certificates tzdata curl python3 + +TESTNET_NAME="berkeley" + +git config --global --add safe.directory /workdir +source buildkite/scripts/export-git-env-vars.sh + +source buildkite/scripts/debian/install.sh "mina-test-suite,mina-$TESTNET_NAME" 1 + +pip3 install -r scripts/benchmarks/requirements.txt \ No newline at end of file diff --git a/buildkite/scripts/bench/snark_transaction_profiler.sh b/buildkite/scripts/bench/snark_transaction_profiler.sh new file mode 100755 index 00000000000..2436efb92d4 --- /dev/null +++ b/buildkite/scripts/bench/snark_transaction_profiler.sh @@ -0,0 +1,12 @@ +#!/bin/bash + +set -eo pipefail + +source buildkite/scripts/bench/install.sh + +K=1 +MAX_NUM_UPDATES=4 +MIN_NUM_UPDATES=2 + +echo "-- Run Snark Transaction Profiler with parameters: --k ${K} --max-num-updates ${MAX_NUM_UPDATES} --min-num-updates ${MIN_NUM_UPDATES}" +python3 ./scripts/benchmarks run --benchmark snark --k ${K} --max-num-updates ${MAX_NUM_UPDATES} --min-num-updates ${MIN_NUM_UPDATES} --outfile snark.out diff --git a/buildkite/scripts/bench/zkapp_metrics.sh b/buildkite/scripts/bench/zkapp_metrics.sh new file mode 100755 index 00000000000..829ece87cb1 --- /dev/null +++ b/buildkite/scripts/bench/zkapp_metrics.sh @@ -0,0 +1,9 @@ +#!/bin/bash + +set -eo pipefail + +source buildkite/scripts/bench/install.sh + +python3 ./scripts/benchmarks run --benchmark zkapp --outfile zakpp-out + +python3 ./scripts/benchmarks run --benchmark heap-usage --outfile heap-usage.out \ No newline at end of file diff --git a/buildkite/scripts/build-artifact.sh b/buildkite/scripts/build-artifact.sh index 263a258ef3c..eddb5f159c0 100755 --- a/buildkite/scripts/build-artifact.sh +++ b/buildkite/scripts/build-artifact.sh @@ -17,14 +17,13 @@ else fi -# TODO: Stop building lib_p2p multiple times by pulling from buildkite-agent artifacts or docker or somewhere -echo "--- Build libp2p_helper TODO: use the previously uploaded build artifact" +echo "--- Build libp2p_helper" make -C src/app/libp2p_helper MAINNET_TARGETS="" [[ ${MINA_BUILD_MAINNET} ]] && MAINNET_TARGETS="src/app/cli/src/mina_mainnet_signatures.exe src/app/rosetta/rosetta_mainnet_signatures.exe src/app/rosetta/ocaml-signer/signer_mainnet_signatures.exe" -echo "--- Build all major tagets required for packaging" +echo "--- Build all major targets required for packaging" echo "Building from Commit SHA: ${MINA_COMMIT_SHA1}" echo "Rust Version: $(rustc --version)" dune build "--profile=${DUNE_PROFILE}" $INSTRUMENTED_PARAM \ @@ -45,4 +44,10 @@ dune build "--profile=${DUNE_PROFILE}" $INSTRUMENTED_PARAM \ src/app/rosetta/indexer_test/indexer_test.exe \ src/app/rosetta/ocaml-signer/signer_testnet_signatures.exe \ src/app/test_executive/test_executive.exe \ - src/test/command_line_tests/command_line_tests.exe # 2>&1 | tee /tmp/buildocaml.log + src/app/benchmarks/benchmarks.exe \ + src/app/ledger_export_bench/ledger_export_benchmark.exe \ + src/app/disk_caching_stats/disk_caching_stats.exe \ + src/app/heap_usage/heap_usage.exe \ + src/app/zkapp_limits/zkapp_limits.exe \ + src/test/command_line_tests/command_line_tests.exe \ + src/test/archive/patch_archive_test/patch_archive_test.exe diff --git a/buildkite/scripts/dhall/checker.py b/buildkite/scripts/dhall/checker.py new file mode 100755 index 00000000000..50ad95acd8c --- /dev/null +++ b/buildkite/scripts/dhall/checker.py @@ -0,0 +1,252 @@ +""" + Runs dhall checks like: + + - validate if all dependencies in jobs are covered + + python3 buildkite/scripts/dhall/checker.py --root ./buildkite/src/Jobs deps + + - all dirtyWhen entries relates to existing files + + python3 buildkite/scripts/dhall/checker.py --root ./buildkite/src/Jobs dirty-when + + - print commands for given job + + python3 buildkite/scripts/dhall/checker.py --root ./buildkite/src/Jobs print-cmd --job SingleNodeTest +""" + + +import argparse +import subprocess +import os +from glob import glob +import tempfile +from pathlib import Path +import yaml + + +class CmdColors: + HEADER = '\033[95m' + OKBLUE = '\033[94m' + OKCYAN = '\033[96m' + OKGREEN = '\033[92m' + WARNING = '\033[93m' + FAIL = '\033[91m' + ENDC = '\033[0m' + BOLD = '\033[1m' + UNDERLINE = '\033[4m' + + +class PipelineInfoBuilder: + + def __init__(self, temp, file): + with open(f"{temp}/{file}") as stream: + try: + self.pipeline = yaml.safe_load(stream) + self.file = file + except yaml.YAMLError as exc: + print(f"cannot parse correctly {temp}/{file}, due to {exc}") + exit(1) + + def get_steps(self): + steps = [] + for step in self.pipeline["pipeline"]["steps"]: + key = step["key"] + deps = [] + if "depends_on" in step: + for dependsOn in step["depends_on"]: + deps.append(dependsOn["step"]) + commands = step["commands"] + steps.append(Step(key, deps, commands)) + return steps + + def get_dirty(self): + dirty = [] + for dirtyWhen in self.pipeline["spec"]["dirtyWhen"]: + path = dirtyWhen["dir"][0] if "dir" in dirtyWhen else "" + exts = dirtyWhen["exts"][0] if "exts" in dirtyWhen else "" + strictEnd = bool(dirtyWhen["strictEnd"]) if ( + "strictEnd" in dirtyWhen) else False + strictStart = bool(dirtyWhen["strictStart"]) if ( + "strictStart" in dirtyWhen) else False + dirty.append(DirtyWhen(path=path, strictStart=strictStart, + strictEnd=strictEnd, extension=exts)) + return dirty + + def build(self): + name = self.pipeline["spec"]["name"] + steps = self.get_steps() + dirty = self.get_dirty() + return PipelineInfo(self.file, self.pipeline, name, steps, dirty) + + +class DirtyWhen: + + def __init__(self, path, extension, strictStart, strictEnd): + self.path = path + self.extension = extension + self.strictStart = strictStart + self.strictEnd = strictEnd + + def calculate_path(self,repo): + if not self.path: + return glob(os.path.join(repo,f'**/*{self.extension}')) + if not self.extension: + if self.strictEnd and self.strictStart: + return glob(os.path.join(repo, f'{self.path}')) + if not self.strictEnd and self.strictStart: + return glob(os.path.join(repo, f'{self.path}*')) + if not self.strictStart and self.strictEnd: + return glob(os.path.join(repo, f'**/{self.path}'), recursive= True) + if not self.strictStart and not self.strictEnd: + return glob(os.path.join(repo, f'*{self.path}*')) + return glob(os.path.join(repo, f'{self.path}.{self.extension}')) + + def __str__(self): + return f"path: '{self.path}', exts: '{self.extension}', startStrict:{self.strictStart}, startEnd:{self.strictEnd}" + + +class Step: + + def __init__(self, key, deps, commands): + self.key = key + self.deps = deps + self.commands = commands + + +class PipelineInfo: + + def __init__(self, file, pipeline, name, steps, dirty): + self.file = file + self.name = name + self.pipeline = pipeline + self.steps = steps + self.dirty = dirty + + def keys(self): + return [step.key for step in self.steps] + + +parser = argparse.ArgumentParser(description='Executes mina benchmarks') +parser.add_argument("--root", required=True, + help="root folder where all dhall files resides") + +subparsers = parser.add_subparsers(dest="cmd") +dirty_when = subparsers.add_parser('dirty-when') +dirty_when.add_argument("--repo", required=True, + help="root folder for mina repo") + +subparsers.add_parser('deps') + + +run = subparsers.add_parser('print-cmd') +run.add_argument("--job", required=True, help="job to run") +run.add_argument("--step", required=False, help="job to run") + +subparsers.add_parser('dups') + +subparsers.add_parser('names') + +args = parser.parse_args() + +pipelinesInfo = [PipelineInfoBuilder(args.root, file).build() + for file in os.listdir(path=args.root)] + +if args.cmd == "deps": + + keys = [] + for pipeline in pipelinesInfo: + keys.extend(pipeline.keys()) + + failedSteps = [] + + for pipeline in pipelinesInfo: + for step in pipeline.steps: + for dep in step.deps: + if not dep in keys: + failedSteps.append((pipeline, step, dep)) + + if any(failedSteps): + print("Fatal: Missing dependency resolution found:") + for (pipeline, step, dep) in failedSteps: + file = str.replace(pipeline.file, ".yml", ".dhall") + print( + f"\t{CmdColors.FAIL}[FATAL] Unresolved dependency for step '{step.key}' in '{file}' depends on non existing job '{dep}'{CmdColors.ENDC}") + exit(1) + else: + print('Pipelines definitions correct') + +if args.cmd == "print-cmd": + pipeline = next(filter(lambda x: args.job in x.file, pipelinesInfo)) + + def get_steps(): + if args.step: + return [next(filter(lambda x: args.step in x.key, pipeline.steps))] + else: + return pipeline.steps + + steps = get_steps() + + for step in steps: + for command in step.commands: + if not command.startswith("echo"): + print(command) + +if args.cmd == "dirty-when": + + failedSteps = [] + + for pipeline in pipelinesInfo: + for dirty in pipeline.dirty: + if not bool(dirty.calculate_path(args.repo)): + failedSteps.append((pipeline, dirty)) + + if any(failedSteps): + print("Fatal: Non existing dirtyWhen path detected:") + for (pipeline, dirty) in failedSteps: + file = str.replace(pipeline.file, ".yml", ".dhall") + print( + f"\t{CmdColors.FAIL}[FATAL] Unresolved dirtyWhen path in '{file}' ('{str(dirty)}'){CmdColors.ENDC}") + exit(1) + else: + print('Pipelines definitions correct') + +if args.cmd == "dups": + + unique_names = set() + dups = [] + + for pipeline in pipelinesInfo: + for step in pipeline.steps: + before = len(unique_names) + unique_names.add(step.key) + if len(unique_names) == before: + dups.append((pipeline,step.key)) + + if any(dups): + print("Fatal: Step name duplication detected:") + for pipeline,step in dups: + file = str.replace(pipeline.file, ".yml", ".dhall") + print( + f"\t{CmdColors.FAIL}[FATAL] Step with name '{step}' in '{file}' is defined more than once{CmdColors.ENDC}") + exit(1) + else: + print('Pipelines definitions correct') + +if args.cmd == "names": + invalid = [] + + for pipeline in pipelinesInfo: + stem = str.replace(pipeline.file, ".yml", "") + if pipeline.name != stem: + invalid.append(pipeline) + + if any(invalid): + print("Fatal: Invalid pipeline name detected:") + for pipeline in invalid: + file = str.replace(pipeline.file, ".yml", ".dhall") + print( + f"\t{CmdColors.FAIL}[FATAL] Job name '{pipeline.name}' in '{file}' is incorrect. " + f"Pipeline name (spec.name) and pipeline filename should match {CmdColors.ENDC}") + exit(1) + else: + print('Pipelines definitions correct') diff --git a/buildkite/scripts/dhall/dump_dhall_to_pipelines.sh b/buildkite/scripts/dhall/dump_dhall_to_pipelines.sh new file mode 100755 index 00000000000..84193329b76 --- /dev/null +++ b/buildkite/scripts/dhall/dump_dhall_to_pipelines.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +ROOT=$1 +OUTPUT=$2 + +mkdir -p "$OUTPUT" + +shopt -s globstar nullglob + +echo "Dumping pipelines from '$ROOT' to '$OUTPUT'" + +COUNTER=0 + +for file in "$ROOT"/**/*.dhall +do + filename=$(basename "$file") + filename="${filename%.*}" + + dhall-to-yaml --quoted --file "$file" > "$OUTPUT"/"$filename".yml + + COUNTER=$((COUNTER+1)) +done + +echo "Done. $COUNTER jobs exported" diff --git a/buildkite/scripts/dump-mina-type-shapes.sh b/buildkite/scripts/dump-mina-type-shapes.sh index 57d3c2b2302..5c1d402e215 100755 --- a/buildkite/scripts/dump-mina-type-shapes.sh +++ b/buildkite/scripts/dump-mina-type-shapes.sh @@ -20,3 +20,5 @@ export TYPE_SHAPE_FILE=${MINA_COMMIT_SHA1}-type_shape.txt echo "--- Create type shapes git note for commit: ${MINA_COMMIT_SHA1}" mina internal dump-type-shapes > ${TYPE_SHAPE_FILE} + +source buildkite/scripts/gsutil-upload.sh ${TYPE_SHAPE_FILE} gs://mina-type-shapes \ No newline at end of file diff --git a/buildkite/scripts/fuzzy-zkapp-test.sh b/buildkite/scripts/fuzzy-zkapp-test.sh index 04b2ebb7e78..bc62bcd4a79 100755 --- a/buildkite/scripts/fuzzy-zkapp-test.sh +++ b/buildkite/scripts/fuzzy-zkapp-test.sh @@ -21,7 +21,7 @@ export LIBP2P_NIXLESS=1 PATH=/usr/lib/go/bin:$PATH GO=/usr/lib/go/bin/go # skip running all of the tests that have already succeeded, since dune will # only retry those tests that failed. echo "--- Run fuzzy zkapp tests" -time dune exec "${path}" --profile="${profile}" -j16 -- --timeout "${timeout}" --individual-test-timeout "${individual_test_timeout}" --seed "${RANDOM}" +time dune exec "${path}" --profile="${profile}" -- --timeout "${timeout}" --individual-test-timeout "${individual_test_timeout}" --seed "${RANDOM}" STATUS=$? if [ "$STATUS" -ne 0 ]; then ./scripts/link-coredumps.sh && exit "$STATUS" diff --git a/buildkite/scripts/gsutil-upload.sh b/buildkite/scripts/gsutil-upload.sh new file mode 100755 index 00000000000..347ed3e38bd --- /dev/null +++ b/buildkite/scripts/gsutil-upload.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +KEY_FILE=/var/secrets/google/key.json + +if [ ! -f $KEY_FILE ]; then + echo "Cannot use gsutil for upload as key file cannot be found in $KEY_FILE" +fi + +gcloud auth activate-service-account --key-file=$KEY_FILE + +gsutil cp $1 $2 \ No newline at end of file diff --git a/buildkite/scripts/promote-docker.sh b/buildkite/scripts/promote-docker.sh index 683bbc28acc..b59e9a61043 100755 --- a/buildkite/scripts/promote-docker.sh +++ b/buildkite/scripts/promote-docker.sh @@ -4,6 +4,7 @@ set -eo pipefail CLEAR='\033[0m' RED='\033[0;31m' +PUBLISH=0 while [[ "$#" -gt 0 ]]; do case $1 in -n|--name) NAME="$2"; shift;; @@ -45,7 +46,7 @@ docker pull ${GCR_REPO}/${NAME}:${VERSION} source buildkite/scripts/export-git-env-vars.sh -if [[ -v PUBLISH ]]; then +if [[ $PUBLISH == 1 ]]; then TARGET_REPO=docker.io/minaprotocol docker tag ${GCR_REPO}/${NAME}:${VERSION} ${TARGET_REPO}/${NAME}:${TAG} docker push "${TARGET_REPO}/${NAME}:${TAG}" diff --git a/buildkite/scripts/run-snark-transaction-profiler.sh b/buildkite/scripts/run-snark-transaction-profiler.sh deleted file mode 100755 index 802cd730632..00000000000 --- a/buildkite/scripts/run-snark-transaction-profiler.sh +++ /dev/null @@ -1,23 +0,0 @@ -#!/bin/bash - -set -eo pipefail - -# Don't prompt for answers during apt-get install -export DEBIAN_FRONTEND=noninteractive - -sudo apt-get update -sudo apt-get install -y git apt-transport-https ca-certificates tzdata curl python3 - -TESTNET_NAME="berkeley" - -git config --global --add safe.directory /workdir -source buildkite/scripts/export-git-env-vars.sh - -source buildkite/scripts/debian/install.sh "mina-${TESTNET_NAME}" 1 - -K=1 -MAX_NUM_UPDATES=4 -MIN_NUM_UPDATES=2 - -echo "-- Run Snark Transaction Profiler with parameters: --zkapps --k ${K} --max-num-updates ${MAX_NUM_UPDATES} --min-num-updates ${MIN_NUM_UPDATES}" -python3 ./scripts/snark_transaction_profiler.py ${K} ${MAX_NUM_UPDATES} ${MIN_NUM_UPDATES} diff --git a/buildkite/scripts/run_verify_promoted_build_job.sh b/buildkite/scripts/run_verify_promoted_build_job.sh index da2a3789d3f..713749f642f 100755 --- a/buildkite/scripts/run_verify_promoted_build_job.sh +++ b/buildkite/scripts/run_verify_promoted_build_job.sh @@ -107,4 +107,4 @@ if [[ "${REMOVE_PROFILE_FROM_NAME}" -eq 0 ]]; then else REMOVE_PROFILE_FROM_NAME="True" fi -echo $PROMOTE_PACKAGE_DHALL_DEF'.verify_artifacts '"$DHALL_DEBIANS"' '"$DHALL_DOCKERS"' "'"${NEW_VERSION}"'" '$PROFILES_DHALL_DEF'.Type.'"${PROFILE}"' '$NETWORK_DHALL_DEF'.Type.'"${NETWORK}"' '"${DHALL_CODENAMES}"' '$DEBIAN_CHANNEL_DHALL_DEF'.Type.'"${TO_CHANNEL}"' "'"${TAG}"'" '${REMOVE_PROFILE_FROM_NAME}' '${DHALL_PUBLISH}' ' | dhall-to-yaml --quoted +echo $PROMOTE_PACKAGE_DHALL_DEF'.verify_artifacts '"$DHALL_DEBIANS"' '"$DHALL_DOCKERS"' "'"${NEW_VERSION}"'" '$PROFILES_DHALL_DEF'.Type.'"${PROFILE}"' '$NETWORK_DHALL_DEF'.Type.'"${NETWORK}"' '"${DHALL_CODENAMES}"' '$DEBIAN_CHANNEL_DHALL_DEF'.Type.'"${TO_CHANNEL}"' "'"${NEW_VERSION}"'" '${REMOVE_PROFILE_FROM_NAME}' '${DHALL_PUBLISH}' ' | dhall-to-yaml --quoted diff --git a/buildkite/scripts/setup-database-for-archive-node.sh b/buildkite/scripts/setup-database-for-archive-node.sh index 9aa9062b223..cf494a1ffaa 100755 --- a/buildkite/scripts/setup-database-for-archive-node.sh +++ b/buildkite/scripts/setup-database-for-archive-node.sh @@ -5,6 +5,7 @@ set -euo pipefail user=$1 password=$2 db=$3 +port=$4 sudo service postgresql start @@ -12,4 +13,4 @@ sudo -u postgres psql -c "CREATE USER ${user} WITH LOGIN SUPERUSER PASSWORD '${p sudo pg_isready service postgresql status sudo -u postgres createdb -O $user $db -PGPASSWORD=$password psql -h localhost -p 5434 -U $user -d $db -a -f src/app/archive/create_schema.sql +PGPASSWORD=$password psql -h localhost -p $port -U $user -d $db -a -f src/app/archive/create_schema.sql diff --git a/buildkite/scripts/unit-test.sh b/buildkite/scripts/unit-test.sh index de885193086..4cb4c5d29bd 100755 --- a/buildkite/scripts/unit-test.sh +++ b/buildkite/scripts/unit-test.sh @@ -21,10 +21,10 @@ export LIBP2P_NIXLESS=1 PATH=/usr/lib/go/bin:$PATH GO=/usr/lib/go/bin/go time make build echo "--- Build all targets" -dune build "${path}" --profile="${profile}" -j16 +dune build "${path}" --profile="${profile}" echo "--- Check for changes to verification keys" -time dune runtest "src/app/print_blockchain_snark_vk" --profile="${profile}" -j16 +time dune runtest "src/app/print_blockchain_snark_vk" --profile="${profile}" # Turn on the proof-cache assertion, so that CI will fail if the proofs need to # be updated. @@ -35,8 +35,8 @@ export ERROR_ON_PROOF=true # skip running all of the tests that have already succeeded, since dune will # only retry those tests that failed. echo "--- Run unit tests" -time dune runtest "${path}" --profile="${profile}" -j16 || \ +time dune runtest "${path}" --profile="${profile}" || \ (./scripts/link-coredumps.sh && \ echo "--- Retrying failed unit tests" && \ - time dune runtest "${path}" --profile="${profile}" -j16 || \ + time dune runtest "${path}" --profile="${profile}" || \ (./scripts/link-coredumps.sh && false)) diff --git a/buildkite/scripts/version-linter-patch-missing-type-shapes.sh b/buildkite/scripts/version-linter-patch-missing-type-shapes.sh new file mode 100755 index 00000000000..74c60dc8bd5 --- /dev/null +++ b/buildkite/scripts/version-linter-patch-missing-type-shapes.sh @@ -0,0 +1,46 @@ +#!/bin/bash + +set -eox pipefail + +if [[ $# -ne 1 ]]; then + echo "Usage: $0 " + exit 1 +fi + +git config --global --add safe.directory /workdir + +source buildkite/scripts/handle-fork.sh +source buildkite/scripts/export-git-env-vars.sh + +release_branch=${REMOTE}/$1 + +RELEASE_BRANCH_COMMIT=$(git log -n 1 --format="%h" --abbrev=7 $release_branch) + +function revert_checkout() { + git checkout $BUILDKITE_COMMIT + git submodule sync + git submodule update --init --recursive +} + +function checkout_and_dump() { + local __commit=$1 + git checkout $__commit + git submodule sync + git submodule update --init --recursive + eval $(opam config env) + TYPE_SHAPE_FILE=${__commit:0:7}-type_shape.txt + dune exec src/app/cli/src/mina.exe internal dump-type-shapes > /tmp/${TYPE_SHAPE_FILE} + revert_checkout + source buildkite/scripts/gsutil-upload.sh /tmp/${TYPE_SHAPE_FILE} gs://mina-type-shapes +} + +if ! $(gsutil ls gs://mina-type-shapes/$RELEASE_BRANCH_COMMIT 2>/dev/null); then + checkout_and_dump $RELEASE_BRANCH_COMMIT +fi + +if [[ -n "${BUILDKITE_PULL_REQUEST_BASE_BRANCH:-}" ]]; then + BUILDKITE_PULL_REQUEST_BASE_BRANCH_COMMIT=$(git log -n 1 --format="%h" --abbrev=7 ${REMOTE}/${BUILDKITE_PULL_REQUEST_BASE_BRANCH} ) + if ! $(gsutil ls gs://mina-type-shapes/$BUILDKITE_PULL_REQUEST_BASE_BRANCH_COMMIT 2>/dev/null); then + checkout_and_dump $BUILDKITE_PULL_REQUEST_BASE_BRANCH_COMMIT + fi +fi \ No newline at end of file diff --git a/buildkite/scripts/zkapp_metrics.sh b/buildkite/scripts/zkapp_metrics.sh deleted file mode 100755 index b943465c96d..00000000000 --- a/buildkite/scripts/zkapp_metrics.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -set -eou pipefail - -eval $(opam config env) && export PATH=$HOME/.cargo/bin:$PATH && ./scripts/zkapp_metrics.sh diff --git a/buildkite/src/Command/Base.dhall b/buildkite/src/Command/Base.dhall index 32ae7f2a820..deefb5a1742 100644 --- a/buildkite/src/Command/Base.dhall +++ b/buildkite/src/Command/Base.dhall @@ -131,6 +131,7 @@ let targetToAgent = , Integration = toMap { size = "integration" } , QA = toMap { size = "qa" } , Hardfork = toMap { size = "hardfork" } + , Perf = toMap { size = "perf" } , Multi = toMap { size = "generic-multi" } } target diff --git a/buildkite/src/Command/Bench/Base.dhall b/buildkite/src/Command/Bench/Base.dhall new file mode 100644 index 00000000000..3d2431943b6 --- /dev/null +++ b/buildkite/src/Command/Bench/Base.dhall @@ -0,0 +1,96 @@ +let PipelineMode = ../../Pipeline/Mode.dhall + +let PipelineTag = ../../Pipeline/Tag.dhall + +let Pipeline = ../../Pipeline/Dsl.dhall + +let JobSpec = ../../Pipeline/JobSpec.dhall + +let DebianVersions = ../../Constants/DebianVersions.dhall + +let RunInToolchain = ../../Command/RunInToolchain.dhall + +let Profiles = ../../Constants/Profiles.dhall + +let Command = ../../Command/Base.dhall + +let Docker = ../../Command/Docker/Type.dhall + +let Size = ../Size.dhall + +let Benchmarks = ../../Constants/Benchmarks.dhall + +let SelectFiles = ../../Lib/SelectFiles.dhall + +let Spec = + { Type = + { key : Text + , bench : Text + , label : Text + , size : Size + , name : Text + , path : Text + , mode : PipelineMode.Type + , dependsOn : List Command.TaggedKey.Type + , additionalDirtyWhen : List SelectFiles.Type + , yellowThreshold : Double + , redThreshold : Double + } + , default = + { mode = PipelineMode.Type.PullRequest + , size = Size.Medium + , dependsOn = + DebianVersions.dependsOn + DebianVersions.DebVersion.Bullseye + Profiles.Type.Standard + , additionalDirtyWhen = [] : List SelectFiles.Type + , yellowThreshold = 0.1 + , redThreshold = 0.2 + } + } + +let command + : Spec.Type -> Command.Type + = \(spec : Spec.Type) + -> Command.build + Command.Config::{ + , commands = + RunInToolchain.runInToolchain + (Benchmarks.toEnvList Benchmarks.Type::{=}) + "./buildkite/scripts/benchmarks.sh ${spec.bench} --red-threshold ${Double/show + spec.redThreshold} --yellow-threshold ${Double/show + spec.yellowThreshold}" + , label = "Perf: ${spec.label}" + , key = spec.key + , target = spec.size + , docker = None Docker.Type + , depends_on = spec.dependsOn + } + +let pipeline + : Spec.Type -> Pipeline.Config.Type + = \(spec : Spec.Type) + -> Pipeline.Config::{ + , spec = JobSpec::{ + , dirtyWhen = + [ SelectFiles.strictlyStart (SelectFiles.contains "src") + , SelectFiles.exactly + "buildkite/src/Command/Bench/Base" + "dhall" + , SelectFiles.contains "scripts/benchmark" + , SelectFiles.contains "buildkite/scripts/benchmark" + ] + # spec.additionalDirtyWhen + , path = spec.path + , name = spec.name + , mode = spec.mode + , tags = + [ PipelineTag.Type.Long + , PipelineTag.Type.Test + , PipelineTag.Type.Stable + ] + } + , steps = [ command spec ] + } + +in { command = command, pipeline = pipeline, Spec = Spec } diff --git a/buildkite/src/Command/FuzzyZkappTest.dhall b/buildkite/src/Command/FuzzyZkappTest.dhall deleted file mode 100644 index e3d7ab3bc73..00000000000 --- a/buildkite/src/Command/FuzzyZkappTest.dhall +++ /dev/null @@ -1,96 +0,0 @@ -let S = ../Lib/SelectFiles.dhall - -let Pipeline = ../Pipeline/Dsl.dhall - -let PipelineMode = ../Pipeline/Mode.dhall - -let PipelineTag = ../Pipeline/Tag.dhall - -let JobSpec = ../Pipeline/JobSpec.dhall - -let Command = ../Command/Base.dhall - -let RunInToolchain = ../Command/RunInToolchain.dhall - -let Docker = ../Command/Docker/Type.dhall - -let Size = ../Command/Size.dhall - -let Profiles = ../Constants/Profiles.dhall - -let Spec = - { Type = - { profile : Profiles.Type - , test_app_path : Text - , timeout : Natural - , individual_test_timeout : Natural - , cmd_target : Size - , job_path : Text - , job_name : Text - , tags : List PipelineTag.Type - , mode : PipelineMode.Type - , additional_dirty_when : List S.Type - } - , default = - { profile = Profiles.Type.Dev - , test_app_path = - "src/lib/transaction_snark/test/zkapp_fuzzy/zkapp_fuzzy.exe" - , timeout = 1200 - , individual_test_timeout = 300 - , cmd_target = Size.Small - , additional_dirty_when = [] : List S.Type - } - } - -let buildTestCmd - : Spec.Type -> Command.Type - = \(spec : Spec.Type) - -> let timeout = Natural/show spec.timeout - - let individual_test_timeout = - Natural/show spec.individual_test_timeout - - let key = "fuzzy-zkapp-unit-test-${Profiles.duneProfile spec.profile}" - - in Command.build - Command.Config::{ - , commands = - RunInToolchain.runInToolchain - [ "DUNE_INSTRUMENT_WITH=bisect_ppx", "COVERALLS_TOKEN" ] - "buildkite/scripts/fuzzy-zkapp-test.sh ${Profiles.duneProfile - spec.profile} ${spec.test_app_path} ${timeout} ${individual_test_timeout} && buildkite/scripts/upload-partial-coverage-data.sh ${key} dev" - , label = "Fuzzy zkapp unit tests" - , key = key - , target = spec.cmd_target - , docker = None Docker.Type - , artifact_paths = [ S.contains "core_dumps/*" ] - , flake_retry_limit = Some 0 - } - -let pipeline - : Spec.Type -> Pipeline.Config.Type - = \(spec : Spec.Type) - -> Pipeline.Config::{ - , spec = - let unitDirtyWhen = - [ S.strictlyStart (S.contains "src/lib") - , S.strictlyStart - ( S.contains - "src/lib/transaction_snark/test/zkapp_fuzzy" - ) - , S.exactly "buildkite/src/Command/FuzzyZkappTest" "dhall" - , S.exactly "buildkite/scripts/fuzzy-zkapp-test" "sh" - ] - # spec.additional_dirty_when - - in JobSpec::{ - , dirtyWhen = unitDirtyWhen - , path = spec.job_path - , name = spec.job_name - , tags = spec.tags - , mode = spec.mode - } - , steps = [ buildTestCmd spec ] - } - -in { pipeline = pipeline, Spec = Spec } diff --git a/buildkite/src/Command/Libp2pHelperBuild.dhall b/buildkite/src/Command/Libp2pHelperBuild.dhall deleted file mode 100644 index c00c51aa07f..00000000000 --- a/buildkite/src/Command/Libp2pHelperBuild.dhall +++ /dev/null @@ -1,41 +0,0 @@ -let Command = ./Base.dhall - -let Size = ./Size.dhall - -let Toolchain = ../Constants/Toolchain.dhall - -let BuildFlags = ../Constants/BuildFlags.dhall - -let Cmd = ../Lib/Cmds.dhall - -let DebianVersions = ../Constants/DebianVersions.dhall - -let commands = - \(debVersion : DebianVersions.DebVersion) - -> [ Cmd.run "chmod -R 777 src/app/libp2p_helper" - , Cmd.run "chmod -R 777 src/libp2p_ipc" - , Cmd.runInDocker - Cmd.Docker::{ - , image = Toolchain.image debVersion - , extraEnv = [ "GO=/usr/lib/go/bin/go" ] - } - "make libp2p_helper" - , Cmd.run - "cp src/app/libp2p_helper/result/bin/libp2p_helper . && buildkite/scripts/buildkite-artifact-helper.sh libp2p_helper" - ] - -let cmdConfig = - \(debVersion : DebianVersions.DebVersion) - -> \(buildFlags : BuildFlags.Type) - -> Command.build - Command.Config::{ - , commands = commands debVersion - , label = - "Build Libp2p helper for ${DebianVersions.capitalName - debVersion} ${BuildFlags.toSuffixUppercase - buildFlags}" - , key = "libp2p-helper${BuildFlags.toLabelSegment buildFlags}" - , target = Size.Multi - } - -in { step = cmdConfig } diff --git a/buildkite/src/Command/MinaArtifact.dhall b/buildkite/src/Command/MinaArtifact.dhall index 5b91eb3fec5..ddc915278da 100644 --- a/buildkite/src/Command/MinaArtifact.dhall +++ b/buildkite/src/Command/MinaArtifact.dhall @@ -16,8 +16,6 @@ let JobSpec = ../Pipeline/JobSpec.dhall let Size = ./Size.dhall -let Libp2p = ./Libp2pHelperBuild.dhall - let DockerImage = ./DockerImage.dhall let DebianVersions = ../Constants/DebianVersions.dhall @@ -257,10 +255,10 @@ let docker_step , deb_repo = DebianRepo.Type.Local , deb_profile = spec.profile , step_key = - "test-suite-${DebianVersions.lowerName - spec.debVersion}${Profiles.toLabelSegment - spec.profile}${BuildFlags.toLabelSegment - spec.buildFlags}--docker-image" + "functional_test_suite-${DebianVersions.lowerName + spec.debVersion}${Profiles.toLabelSegment + spec.profile}${BuildFlags.toLabelSegment + spec.buildFlags}-docker-image" , network = "berkeley" } ] @@ -320,20 +318,14 @@ let onlyDebianPipeline = \(spec : MinaBuildSpec.Type) -> pipelineBuilder spec - [ Libp2p.step spec.debVersion spec.buildFlags - , build_artifacts spec - , publish_to_debian_repo spec - ] + [ build_artifacts spec, publish_to_debian_repo spec ] let pipeline : MinaBuildSpec.Type -> Pipeline.Config.Type = \(spec : MinaBuildSpec.Type) -> pipelineBuilder spec - ( [ Libp2p.step spec.debVersion spec.buildFlags - , build_artifacts spec - , publish_to_debian_repo spec - ] + ( [ build_artifacts spec, publish_to_debian_repo spec ] # docker_commands spec ) diff --git a/buildkite/src/Command/PatchArchiveTest.dhall b/buildkite/src/Command/PatchArchiveTest.dhall new file mode 100644 index 00000000000..e220106c236 --- /dev/null +++ b/buildkite/src/Command/PatchArchiveTest.dhall @@ -0,0 +1,32 @@ +let Artifacts = ../Constants/Artifacts.dhall + +let Command = ./Base.dhall + +let Size = ./Size.dhall + +let Network = ../Constants/Network.dhall + +let RunWithPostgres = ./RunWithPostgres.dhall + +let key = "patch-archive-test" + +in { step = + \(dependsOn : List Command.TaggedKey.Type) + -> Command.build + Command.Config::{ + , commands = + [ RunWithPostgres.runInDockerWithPostgresConn + [ "PATCH_ARCHIVE_TEST_APP=mina-patch-archive-test" + , "NETWORK_DATA_FOLDER=/etc/mina/test/archive/sample_db" + ] + "./src/test/archive/sample_db/archive_db.sql" + Artifacts.Type.FunctionalTestSuite + (None Network.Type) + "./scripts/patch-archive-test.sh && buildkite/scripts/upload-partial-coverage-data.sh ${key}" + ] + , label = "Archive: Patch Archive test" + , key = key + , target = Size.Large + , depends_on = dependsOn + } + } diff --git a/buildkite/src/Command/ReplayerTest.dhall b/buildkite/src/Command/ReplayerTest.dhall index 6a742c7e7fd..1e031edd4e2 100644 --- a/buildkite/src/Command/ReplayerTest.dhall +++ b/buildkite/src/Command/ReplayerTest.dhall @@ -8,6 +8,8 @@ let RunWithPostgres = ./RunWithPostgres.dhall let Network = ../Constants/Network.dhall +let key = "replayer-test" + in { step = \(dependsOn : List Command.TaggedKey.Type) -> Command.build @@ -16,12 +18,12 @@ in { step = [ RunWithPostgres.runInDockerWithPostgresConn ([] : List Text) "./src/test/archive/sample_db/archive_db.sql" - Artifacts.Type.Archive + Artifacts.Type.FunctionalTestSuite (None Network.Type) - "./buildkite/scripts/replayer-test.sh" + "./buildkite/scripts/replayer-test.sh && buildkite/scripts/upload-partial-coverage-data.sh ${key}" ] , label = "Archive: Replayer test" - , key = "replayer-test" + , key = key , target = Size.Large , depends_on = dependsOn } diff --git a/buildkite/src/Command/Size.dhall b/buildkite/src/Command/Size.dhall index eda37582dc4..a7cadacc02a 100644 --- a/buildkite/src/Command/Size.dhall +++ b/buildkite/src/Command/Size.dhall @@ -1 +1 @@ -< XLarge | Large | Medium | Small | Integration | QA | Hardfork | Multi > +< XLarge | Large | Medium | Small | Integration | QA | Hardfork | Multi | Perf > diff --git a/buildkite/src/Constants/Benchmarks.dhall b/buildkite/src/Constants/Benchmarks.dhall new file mode 100644 index 00000000000..d303dd42499 --- /dev/null +++ b/buildkite/src/Constants/Benchmarks.dhall @@ -0,0 +1,21 @@ +let Spec = + { Type = { tokenEnvName : Text, bucket : Text, org : Text, host : Text } + , default = + { tokenEnvName = "\\\${INFLUX_TOKEN}" + , bucket = "\\\${INFLUX_BUCKET_NAME}" + , org = "\\\${INFLUX_ORG}" + , host = "\\\${INFLUX_HOST}" + } + } + +let toEnvList = + \(spec : Spec.Type) + -> [ "INFLUX_HOST=${spec.host}" + , "INFLUX_TOKEN=${spec.tokenEnvName}" + , "INFLUX_ORG=${spec.org}" + , "INFLUX_BUCKET_NAME=${spec.bucket}" + ] + +let mainlineBranches = "[develop,compatible,master]" + +in { Type = Spec, toEnvList = toEnvList, mainlineBranches = mainlineBranches } diff --git a/buildkite/src/Constants/ContainerImages.dhall b/buildkite/src/Constants/ContainerImages.dhall index f4916e06773..b42dfd5ecae 100644 --- a/buildkite/src/Constants/ContainerImages.dhall +++ b/buildkite/src/Constants/ContainerImages.dhall @@ -4,16 +4,16 @@ -- NOTE: minaToolchainBookworm is also used for building Ubuntu Jammy packages in CI { toolchainBase = "codaprotocol/ci-toolchain-base:v3" , minaToolchainBullseye = - "gcr.io/o1labs-192920/mina-toolchain@sha256:a1f60d69f3657060d6e7289dc770fd7c36fc5a067853019c2f3f6247cb4b6673" + "gcr.io/o1labs-192920/mina-toolchain@sha256:fee11e64a54fd8f026c4632fed7b7b9835b8262a037cdb156deb61d3d0aac8b2" , minaToolchainBookworm = - "gcr.io/o1labs-192920/mina-toolchain@sha256:a1f60d69f3657060d6e7289dc770fd7c36fc5a067853019c2f3f6247cb4b6673" + "gcr.io/o1labs-192920/mina-toolchain@sha256:fee11e64a54fd8f026c4632fed7b7b9835b8262a037cdb156deb61d3d0aac8b2" , minaToolchain = - "gcr.io/o1labs-192920/mina-toolchain@sha256:a1f60d69f3657060d6e7289dc770fd7c36fc5a067853019c2f3f6247cb4b6673" + "gcr.io/o1labs-192920/mina-toolchain@sha256:fee11e64a54fd8f026c4632fed7b7b9835b8262a037cdb156deb61d3d0aac8b2" , elixirToolchain = "elixir:1.10-alpine" , nodeToolchain = "node:14.13.1-stretch-slim" , ubuntu2004 = "ubuntu:20.04" , postgres = "postgres:12.4-alpine" , xrefcheck = - "serokell/xrefcheck@sha256:8fbb35a909abc353364f1bd3148614a1160ef3c111c0c4ae84e58fdf16019eeb" + "dkhamsing/awesome_bot@sha256:a8adaeb3b3bd5745304743e4d8a6d512127646e420544a6d22d9f58a07f35884" , nixos = "gcr.io/o1labs-192920/nix-unstable:1.0.0" } diff --git a/buildkite/src/Constants/DebianVersions.dhall b/buildkite/src/Constants/DebianVersions.dhall index f3042f5f067..138a0533803 100644 --- a/buildkite/src/Constants/DebianVersions.dhall +++ b/buildkite/src/Constants/DebianVersions.dhall @@ -90,6 +90,8 @@ let minimalDirtyWhen = , S.exactly "buildkite/src/Constants/ContainerImages" "dhall" , S.exactly "buildkite/src/Command/HardforkPackageGeneration" "dhall" , S.exactly "buildkite/src/Command/MinaArtifact" "dhall" + , S.exactly "buildkite/src/Command/PatchArchiveTest" "dhall" + , S.exactly "buildkite/src/Command/ReplayerTest" "dhall" , S.strictlyStart (S.contains "buildkite/src/Jobs/Release/MinaArtifact") , S.strictlyStart (S.contains "dockerfiles/stages") , S.exactly "scripts/debian/build" "sh" @@ -100,8 +102,7 @@ let minimalDirtyWhen = , S.exactly "buildkite/scripts/build-hardfork-package" "sh" , S.exactly "buildkite/scripts/check-compatibility" "sh" , S.exactly "buildkite/src/Jobs/Test/RunSnarkProfiler" "dhall" - , S.exactly "buildkite/scripts/run-snark-transaction-profiler" "sh" - , S.exactly "scripts/snark_transaction_profiler" "py" + , S.exactly "buildkite/scripts/bench/snark_transaction_profiler" "sh" , S.exactly "buildkite/scripts/version-linter" "sh" , S.exactly "scripts/version-linter" "py" ] diff --git a/buildkite/src/Jobs/Lint/Dhall.dhall b/buildkite/src/Jobs/Lint/Dhall.dhall index 481da3c8817..8854af5414e 100644 --- a/buildkite/src/Jobs/Lint/Dhall.dhall +++ b/buildkite/src/Jobs/Lint/Dhall.dhall @@ -14,6 +14,15 @@ let Docker = ../../Command/Docker/Type.dhall let Size = ../../Command/Size.dhall +let RunInToolchain = ../../Command/RunInToolchain.dhall + +let dump_pipelines_cmd = + Cmd.runInDocker + Cmd.Docker::{ + , image = (../../Constants/ContainerImages.dhall).toolchainBase + } + "buildkite/scripts/dhall/dump_dhall_to_pipelines.sh buildkite/src/Jobs _pipelines" + in Pipeline.build Pipeline.Config::{ , spec = JobSpec::{ @@ -58,5 +67,53 @@ in Pipeline.build , image = (../../Constants/ContainerImages.dhall).toolchainBase } } + , Command.build + Command.Config::{ + , commands = + [ dump_pipelines_cmd ] + # RunInToolchain.runInToolchainBullseye + ([] : List Text) + "python3 ./buildkite/scripts/dhall/checker.py --root _pipelines deps" + , label = "Dhall: deps" + , key = "check-dhall-deps" + , target = Size.Multi + , docker = None Docker.Type + } + , Command.build + Command.Config::{ + , commands = + [ dump_pipelines_cmd ] + # RunInToolchain.runInToolchainBullseye + ([] : List Text) + "python3 ./buildkite/scripts/dhall/checker.py --root _pipelines dirty-when --repo ." + , label = "Dhall: dirtyWhen" + , key = "check-dhall-dirty" + , target = Size.Multi + , docker = None Docker.Type + } + , Command.build + Command.Config::{ + , commands = + [ dump_pipelines_cmd ] + # RunInToolchain.runInToolchainBullseye + ([] : List Text) + "python3 ./buildkite/scripts/dhall/checker.py --root _pipelines dups" + , label = "Dhall: duplicates" + , key = "check-dhall-dups" + , target = Size.Multi + , docker = None Docker.Type + } + , Command.build + Command.Config::{ + , commands = + [ dump_pipelines_cmd ] + # RunInToolchain.runInToolchainBullseye + ([] : List Text) + "python3 ./buildkite/scripts/dhall/checker.py --root _pipelines names" + , label = "Dhall: job names" + , key = "check-dhall-jobs" + , target = Size.Multi + , docker = None Docker.Type + } ] } diff --git a/buildkite/src/Jobs/Lint/Fast.dhall b/buildkite/src/Jobs/Lint/Fast.dhall index 22274dd793f..9a9f04f2b62 100644 --- a/buildkite/src/Jobs/Lint/Fast.dhall +++ b/buildkite/src/Jobs/Lint/Fast.dhall @@ -45,7 +45,7 @@ in Pipeline.build , label = "Fast lint steps; CODEOWNERs, RFCs, Check Snarky & Proof-Systems submodules, Preprocessor Deps" , key = "lint" - , target = Size.Small + , target = Size.Multi , docker = Some Docker::{ , image = (../../Constants/ContainerImages.dhall).toolchainBase } diff --git a/buildkite/src/Jobs/Lint/HelmChart.dhall b/buildkite/src/Jobs/Lint/HelmChart.dhall index fc3cd762b70..e8e2abef194 100644 --- a/buildkite/src/Jobs/Lint/HelmChart.dhall +++ b/buildkite/src/Jobs/Lint/HelmChart.dhall @@ -39,7 +39,7 @@ in Pipeline.build [ Cmd.run "HELM_LINT=true buildkite/scripts/helm-ci.sh" ] , label = "Helm chart lint steps" , key = "lint-helm-chart" - , target = Size.Small + , target = Size.Multi , docker = None Docker.Type } ] diff --git a/buildkite/src/Jobs/Lint/Merge.dhall b/buildkite/src/Jobs/Lint/Merge.dhall index 57c2ecc0099..a03ae66b395 100644 --- a/buildkite/src/Jobs/Lint/Merge.dhall +++ b/buildkite/src/Jobs/Lint/Merge.dhall @@ -33,7 +33,7 @@ in Pipeline.build [ Cmd.run "buildkite/scripts/merges-cleanly.sh compatible" ] , label = "Check merges cleanly into compatible" , key = "clean-merge-compatible" - , target = Size.Small + , target = Size.Multi , docker = Some Docker::{ , image = (../../Constants/ContainerImages.dhall).toolchainBase } @@ -44,7 +44,7 @@ in Pipeline.build [ Cmd.run "buildkite/scripts/merges-cleanly.sh develop" ] , label = "Check merges cleanly into develop" , key = "clean-merge-develop" - , target = Size.Small + , target = Size.Multi , docker = Some Docker::{ , image = (../../Constants/ContainerImages.dhall).toolchainBase } @@ -55,7 +55,7 @@ in Pipeline.build [ Cmd.run "buildkite/scripts/merges-cleanly.sh master" ] , label = "Check merges cleanly into master" , key = "clean-merge-master" - , target = Size.Small + , target = Size.Multi , docker = Some Docker::{ , image = (../../Constants/ContainerImages.dhall).toolchainBase } diff --git a/buildkite/src/Jobs/Lint/ValidationService.dhall b/buildkite/src/Jobs/Lint/ValidationService.dhall index fcff4b30627..350de107453 100644 --- a/buildkite/src/Jobs/Lint/ValidationService.dhall +++ b/buildkite/src/Jobs/Lint/ValidationService.dhall @@ -75,10 +75,7 @@ in Pipeline.build (S.contains "buildkite/src/Jobs/Lint/ValidationService") in JobSpec::{ - , dirtyWhen = - [ dirtyDhallDir - , S.strictlyStart (S.contains ValidationService.rootPath) - ] + , dirtyWhen = [ dirtyDhallDir ] , path = "Lint" , name = "ValidationService" , tags = diff --git a/buildkite/src/Jobs/Lint/Xrefcheck.dhall b/buildkite/src/Jobs/Lint/Xrefcheck.dhall index c8fead2e93d..d586f08c5cf 100644 --- a/buildkite/src/Jobs/Lint/Xrefcheck.dhall +++ b/buildkite/src/Jobs/Lint/Xrefcheck.dhall @@ -12,8 +12,6 @@ let Cmd = ../../Lib/Cmds.dhall let Command = ../../Command/Base.dhall -let Docker = ../../Command/Docker/Type.dhall - let Size = ../../Command/Size.dhall let B/SoftFail = B.definitions/commandStep/properties/soft_fail/Type @@ -23,7 +21,8 @@ in Pipeline.build , spec = JobSpec::{ , dirtyWhen = [ SelectFiles.strictly SelectFiles::{ exts = Some [ "md" ] } - , SelectFiles.strictly (SelectFiles.contains ".xrefcheck.yml") + , SelectFiles.strictlyStart + (SelectFiles.contains "buildkite/src/Jobs/Lint/Xrefcheck.dhall") ] , path = "Lint" , name = "Xrefcheck" @@ -36,15 +35,27 @@ in Pipeline.build , steps = [ Command.build Command.Config::{ - , commands = [] : List Cmd.Type + , commands = + [ Cmd.runInDocker + Cmd.Docker::{ + , image = (../../Constants/ContainerImages.dhall).xrefcheck + } + ( "awesome_bot -allow-dupe " + ++ "--allow-redirect " + ++ "--allow 403,401 " + ++ "--skip-save-results " + ++ "--files " + ++ "`find . -name \"*.md\" " + ++ "! -path \"./src/lib/crypto/kimchi_bindings/*\" " + ++ "! -path \"./src/lib/crypto/proof-systems/*\" " + ++ "! -path \"./src/external/*\" " + ++ "` " + ) + ] , label = "Verifies references in markdown" , key = "xrefcheck" , target = Size.Small , soft_fail = Some (B/SoftFail.Boolean True) - , docker = Some Docker::{ - , image = (../../Constants/ContainerImages.dhall).xrefcheck - , shell = None (List Text) - } } ] } diff --git a/buildkite/src/Jobs/Test/ArchiveNodeUnitTest.dhall b/buildkite/src/Jobs/Test/ArchiveNodeUnitTest.dhall index 2cd73c0090d..b3799a03535 100644 --- a/buildkite/src/Jobs/Test/ArchiveNodeUnitTest.dhall +++ b/buildkite/src/Jobs/Test/ArchiveNodeUnitTest.dhall @@ -24,6 +24,8 @@ let password = "codarules" let db = "archiver" +let port = "5433" + let command_key = "archive-unit-tests" in Pipeline.build @@ -50,14 +52,14 @@ in Pipeline.build [ "POSTGRES_PASSWORD=${password}" , "POSTGRES_USER=${user}" , "POSTGRES_DB=${db}" - , "MINA_TEST_POSTGRES=postgres://${user}:${password}@localhost:5434/${db}" + , "MINA_TEST_POSTGRES=postgres://${user}:${password}@localhost:${port}/${db}" , "GO=/usr/lib/go/bin/go" , "DUNE_INSTRUMENT_WITH=bisect_ppx" , "COVERALLS_TOKEN" ] ( Prelude.Text.concatSep " && " - [ "bash buildkite/scripts/setup-database-for-archive-node.sh ${user} ${password} ${db}" + [ "bash buildkite/scripts/setup-database-for-archive-node.sh ${user} ${password} ${db} ${port}" , WithCargo.withCargo "eval \\\$(opam config env) && dune runtest src/app/archive && buildkite/scripts/upload-partial-coverage-data.sh ${command_key} dev" ] diff --git a/buildkite/src/Jobs/Test/FuzzyZkappTest.dhall b/buildkite/src/Jobs/Test/FuzzyZkappTest.dhall deleted file mode 100644 index e983db0383e..00000000000 --- a/buildkite/src/Jobs/Test/FuzzyZkappTest.dhall +++ /dev/null @@ -1,22 +0,0 @@ -let S = ../../Lib/SelectFiles.dhall - -let Pipeline = ../../Pipeline/Dsl.dhall - -let PipelineMode = ../../Pipeline/Mode.dhall - -let PipelineTag = ../../Pipeline/Tag.dhall - -let Command = ../../Command/FuzzyZkappTest.dhall - -in Pipeline.build - ( Command.pipeline - Command.Spec::{ - , job_path = "Test" - , job_name = "FuzzyZkappTest" - , tags = [ PipelineTag.Type.VeryLong, PipelineTag.Type.Test ] - , mode = PipelineMode.Type.Stable - , additional_dirty_when = - [ S.exactly "buildkite/src/Jobs/Test/FuzzyZkappTest" "dhall" ] - , timeout = 1200 - } - ) diff --git a/buildkite/src/Jobs/Test/PatchArchiveTest.dhall b/buildkite/src/Jobs/Test/PatchArchiveTest.dhall new file mode 100644 index 00000000000..a0414c85a4f --- /dev/null +++ b/buildkite/src/Jobs/Test/PatchArchiveTest.dhall @@ -0,0 +1,44 @@ +let S = ../../Lib/SelectFiles.dhall + +let Pipeline = ../../Pipeline/Dsl.dhall + +let PipelineTag = ../../Pipeline/Tag.dhall + +let JobSpec = ../../Pipeline/JobSpec.dhall + +let PatchArchiveTest = ../../Command/PatchArchiveTest.dhall + +let Profiles = ../../Constants/Profiles.dhall + +let Network = ../../Constants/Network.dhall + +let Artifacts = ../../Constants/Artifacts.dhall + +let Dockers = ../../Constants/DockerVersions.dhall + +let dependsOn = + Dockers.dependsOn + Dockers.Type.Bullseye + (None Network.Type) + Profiles.Type.Standard + Artifacts.Type.FunctionalTestSuite + +in Pipeline.build + Pipeline.Config::{ + , spec = JobSpec::{ + , dirtyWhen = + [ S.strictlyStart (S.contains "src") + , S.exactly "scripts/patch-archive-test" "sh" + , S.exactly "buildkite/src/Jobs/Test/PatchArchiveTest" "dhall" + , S.exactly "buildkite/src/Command/PatchArchiveTest" "dhall" + ] + , path = "Test" + , name = "PatchArchiveTest" + , tags = + [ PipelineTag.Type.Long + , PipelineTag.Type.Test + , PipelineTag.Type.Stable + ] + } + , steps = [ PatchArchiveTest.step dependsOn ] + } diff --git a/buildkite/src/Jobs/Test/ReplayerTest.dhall b/buildkite/src/Jobs/Test/ReplayerTest.dhall index 0e3d665e2ce..f9caa4f30a7 100644 --- a/buildkite/src/Jobs/Test/ReplayerTest.dhall +++ b/buildkite/src/Jobs/Test/ReplayerTest.dhall @@ -21,7 +21,7 @@ let dependsOn = Dockers.Type.Bullseye (None Network.Type) Profiles.Type.Standard - Artifacts.Type.Archive + Artifacts.Type.FunctionalTestSuite in Pipeline.build Pipeline.Config::{ diff --git a/buildkite/src/Jobs/Test/RunSnarkProfiler.dhall b/buildkite/src/Jobs/Test/RunSnarkProfiler.dhall index 0523f1e1e74..9bfa98eb8a5 100644 --- a/buildkite/src/Jobs/Test/RunSnarkProfiler.dhall +++ b/buildkite/src/Jobs/Test/RunSnarkProfiler.dhall @@ -32,7 +32,7 @@ let buildTestCmd , commands = RunInToolchain.runInToolchain ([] : List Text) - "buildkite/scripts/run-snark-transaction-profiler.sh" + "buildkite/scripts/bench/snark_transaction_profiler.sh" , label = "Snark Transaction Profiler" , key = "snark-transaction-profiler" , target = cmd_target @@ -48,9 +48,9 @@ in Pipeline.build [ S.strictlyStart (S.contains "src") , S.exactly "buildkite/src/Jobs/Test/RunSnarkProfiler" "dhall" , S.exactly - "buildkite/scripts/run-snark-transaction-profiler" + "buildkite/scripts/bench/snark_transaction_profiler" "sh" - , S.exactly "scripts/snark_transaction_profiler" "py" + , S.strictlyStart (S.contains "scripts/benchmarks") ] in JobSpec::{ diff --git a/buildkite/src/Jobs/Test/TerraformNetworkTest.dhall b/buildkite/src/Jobs/Test/TerraformNetworkTest.dhall index 336f4612cb6..3108316b34b 100644 --- a/buildkite/src/Jobs/Test/TerraformNetworkTest.dhall +++ b/buildkite/src/Jobs/Test/TerraformNetworkTest.dhall @@ -35,8 +35,8 @@ in Pipeline.build Pipeline.Config::{ , spec = let unitDirtyWhen = - [ S.strictlyStart (S.contains "src/automation/terraform") - , S.strictlyStart (S.contains "src/helm") + [ S.strictlyStart (S.contains "automation/terraform") + , S.strictlyStart (S.contains "helm") , S.strictlyStart (S.contains "buildkite/src/Jobs/Test/TerraformNetworkTest") , S.strictlyStart diff --git a/buildkite/src/Jobs/Test/TestnetIntegrationTests.dhall b/buildkite/src/Jobs/Test/TestnetIntegrationTests.dhall index 933a21bae6e..a519b2f4016 100644 --- a/buildkite/src/Jobs/Test/TestnetIntegrationTests.dhall +++ b/buildkite/src/Jobs/Test/TestnetIntegrationTests.dhall @@ -38,20 +38,15 @@ in Pipeline.build , S.strictlyStart (S.contains "dockerfiles") , S.strictlyStart (S.contains "buildkite/src/Jobs/Test/TestnetIntegrationTest") - , S.strictlyStart - (S.contains "buildkite/src/Jobs/Command/TestExecutive") + , S.strictlyStart (S.contains "buildkite/src/Command/TestExecutive") , S.strictlyStart (S.contains "automation/terraform/modules/o1-integration") , S.strictlyStart (S.contains "automation/terraform/modules/kubernetes/testnet") , S.strictlyStart - ( S.contains - "automation/buildkite/script/run-test-executive-cloud" - ) + (S.contains "buildkite/scripts/run-test-executive-cloud") , S.strictlyStart - ( S.contains - "automation/buildkite/script/run-test-executive-local" - ) + (S.contains "buildkite/scripts/run-test-executive-local") ] , path = "Test" , name = "TestnetIntegrationTests" diff --git a/buildkite/src/Jobs/Test/TestnetIntegrationTestsLong.dhall b/buildkite/src/Jobs/Test/TestnetIntegrationTestsLong.dhall index c02eb9c5262..4047d2212fd 100644 --- a/buildkite/src/Jobs/Test/TestnetIntegrationTestsLong.dhall +++ b/buildkite/src/Jobs/Test/TestnetIntegrationTestsLong.dhall @@ -38,8 +38,7 @@ in Pipeline.build , S.strictlyStart (S.contains "dockerfiles") , S.strictlyStart (S.contains "buildkite/src/Jobs/Test/TestnetIntegrationTest") - , S.strictlyStart - (S.contains "buildkite/src/Jobs/Command/TestExecutive") + , S.strictlyStart (S.contains "buildkite/src/Command/TestExecutive") , S.strictlyStart (S.contains "automation/terraform/modules/o1-integration") , S.strictlyStart diff --git a/buildkite/src/Jobs/Test/VersionLint.dhall b/buildkite/src/Jobs/Test/VersionLint.dhall index bc47db104ad..1368e8e70bc 100644 --- a/buildkite/src/Jobs/Test/VersionLint.dhall +++ b/buildkite/src/Jobs/Test/VersionLint.dhall @@ -1,5 +1,3 @@ -let Cmd = ../../Lib/Cmds.dhall - let S = ../../Lib/SelectFiles.dhall let B = ../../External/Buildkite.dhall @@ -34,9 +32,9 @@ let buildTestCmd RunInToolchain.runInToolchain ([] : List Text) "buildkite/scripts/dump-mina-type-shapes.sh" - # [ Cmd.run - "gsutil cp \$(git log -n 1 --format=%h --abbrev=7)-type_shape.txt \$MINA_TYPE_SHAPE gs://mina-type-shapes" - ] + # RunInToolchain.runInToolchain + ([] : List Text) + "buildkite/scripts/version-linter-patch-missing-type-shapes.sh ${release_branch}" # RunInToolchain.runInToolchain ([] : List Text) "buildkite/scripts/version-linter.sh ${release_branch}" diff --git a/buildkite/src/Jobs/Test/ZkappMetrics.dhall b/buildkite/src/Jobs/Test/ZkappMetrics.dhall index 69fda58815b..90117a87a34 100644 --- a/buildkite/src/Jobs/Test/ZkappMetrics.dhall +++ b/buildkite/src/Jobs/Test/ZkappMetrics.dhall @@ -8,18 +8,29 @@ let Command = ../../Command/Base.dhall let RunInToolchain = ../../Command/RunInToolchain.dhall +let DebianVersions = ../../Constants/DebianVersions.dhall + +let Profiles = ../../Constants/Profiles.dhall + let Docker = ../../Command/Docker/Type.dhall let Size = ../../Command/Size.dhall let JobSpec = ../../Pipeline/JobSpec.dhall +let dependsOn = + DebianVersions.dependsOn + DebianVersions.DebVersion.Bullseye + Profiles.Type.Standard + in Pipeline.build Pipeline.Config::{ , spec = JobSpec::{ , dirtyWhen = [ S.strictlyStart (S.contains "buildkite/src/Jobs/Test/ZkappMetrics") , S.strictlyStart (S.contains "src") + , S.exactly "buildkite/scripts/bench/zkapp_metrics" "sh" + , S.strictlyStart (S.contains "scripts/benchmarks") ] , path = "Test" , name = "ZkappMetrics" @@ -35,11 +46,12 @@ in Pipeline.build , commands = RunInToolchain.runInToolchain ([] : List Text) - "./buildkite/scripts/zkapp_metrics.sh" + "./buildkite/scripts/bench/zkapp_metrics.sh" , label = "Zkapp Metrics" , key = "zkapp-metrics" , target = Size.Medium , docker = None Docker.Type + , depends_on = dependsOn } ] } diff --git a/buildkite/src/Lib/Cmds.dhall b/buildkite/src/Lib/Cmds.dhall index 63d5c3d7950..a64bfc8800c 100644 --- a/buildkite/src/Lib/Cmds.dhall +++ b/buildkite/src/Lib/Cmds.dhall @@ -71,11 +71,11 @@ let module = = if docker.useBash then "/bin/bash" else "/bin/sh" in { line = - "docker run -it --rm --entrypoint ${entrypoint} --init --volume ${sharedDir}:/shared --volume ${outerDir}:/workdir --workdir /workdir${envVars}${ if docker.privileged + "docker run -it --rm --entrypoint ${entrypoint} --init --volume /var/secrets:/var/secrets --volume ${sharedDir}:/shared --volume ${outerDir}:/workdir --workdir /workdir${envVars}${ if docker.privileged - then " --privileged" + then " --privileged" - else ""} ${docker.image} -c '${inner.line}'" + else ""} ${docker.image} -c '${inner.line}'" , readable = Optional/map Text @@ -142,7 +142,7 @@ let tests = let dockerExample = assert : { line = - "docker run -it --rm --entrypoint /bin/bash --init --volume /var/buildkite/shared:/shared --volume \\\$BUILDKITE_BUILD_CHECKOUT_PATH:/workdir --workdir /workdir --env ENV1 --env ENV2 --env TEST foo/bar:tag -c 'echo hello'" + "docker run -it --rm --entrypoint /bin/bash --init --volume /var/secrets:/var/secrets --volume /var/buildkite/shared:/shared --volume \\\$BUILDKITE_BUILD_CHECKOUT_PATH:/workdir --workdir /workdir --env ENV1 --env ENV2 --env TEST foo/bar:tag -c 'echo hello'" , readable = Some "Docker@foo/bar:tag ( echo hello )" } === M.inDocker @@ -154,7 +154,7 @@ let tests = let cacheExample = assert - : "./buildkite/scripts/cache-through.sh data.tar \"docker run -it --rm --entrypoint /bin/bash --init --volume /var/buildkite/shared:/shared --volume \\\$BUILDKITE_BUILD_CHECKOUT_PATH:/workdir --workdir /workdir --env ENV1 --env ENV2 --env TEST foo/bar:tag -c 'echo hello > /tmp/data/foo.txt && tar cvf data.tar /tmp/data'\"" + : "./buildkite/scripts/cache-through.sh data.tar \"docker run -it --rm --entrypoint /bin/bash --init --volume /var/secrets:/var/secrets --volume /var/buildkite/shared:/shared --volume \\\$BUILDKITE_BUILD_CHECKOUT_PATH:/workdir --workdir /workdir --env ENV1 --env ENV2 --env TEST foo/bar:tag -c 'echo hello > /tmp/data/foo.txt && tar cvf data.tar /tmp/data'\"" === M.format ( M.cacheThrough M.Docker::{ diff --git a/dockerfiles/stages/3-toolchain b/dockerfiles/stages/3-toolchain index 3f66574da27..03ae9a6a9a8 100644 --- a/dockerfiles/stages/3-toolchain +++ b/dockerfiles/stages/3-toolchain @@ -8,10 +8,11 @@ ARG deb_codename=focal ARG DOCKER_VERSION=19.03.4 ARG TERRAFORM_VERSION=0.14.11 -ARG DEBS3_VERSION=0.11.6 +ARG DEBS3_VERSION=0.11.7 ARG DHALL_VERSION=1.41.1 ARG DHALL_JSON_VERSION=1.7.10 ARG DHALL_BASH_VERSION=1.0.40 +ARG INFLUXDB_CLI_VERSION=2.7.5 USER root @@ -70,6 +71,14 @@ RUN curl -sLO https://github.com/MinaProtocol/deb-s3/releases/download/${DEBS3_V && gem install deb-s3-${DEBS3_VERSION}.gem \ && rm -f deb-s3-${DEBS3_VERSION}.gem +# --- deb-s3 tool +# Custom version, with lock only on manifest upload +RUN wget https://download.influxdata.com/influxdb/releases/influxdb2-client-${INFLUXDB_CLI_VERSION}-linux-amd64.tar.gz \ + && mkdir -p "influx_dir" && tar xvzf influxdb2-client-${INFLUXDB_CLI_VERSION}-linux-amd64.tar.gz -C influx_dir \ + && sudo cp influx_dir/influx /usr/local/bin/ \ + && rm influxdb2-client-${INFLUXDB_CLI_VERSION}-linux-amd64.tar.gz \ + && rm -rf influx_dir + # --- Docker Daemon RUN curl -sL https://download.docker.com/linux/static/stable/x86_64/docker-${DOCKER_VERSION}.tgz \ | tar --extract --gzip --strip-components 1 --directory=/usr/bin --file=- diff --git a/nix/libp2p_helper.json b/nix/libp2p_helper.json index 7a0a9b7e34a..783913efe5a 100644 --- a/nix/libp2p_helper.json +++ b/nix/libp2p_helper.json @@ -1 +1 @@ -{"go.mod":"d5de7e35a76f5c9ce7d6c98f0da39c763961e77b8c94761b1e89ab4bdfdc2a97","go.sum":"586fd920114d3875ec3e1d739921d77d30ad8e2f297b67781ca41d25a81b65a9","vendorSha256":"sha256-vyKrKi5bqm8Mf2rUOojSY0IXHcuNpcVNvd1Iu1RBxDo="} \ No newline at end of file +{"go.mod":"6c45e03ccef1f79541f021cf358fa69bf80cb69b58ae92c776bc09cbb1cc8096","go.sum":"d0f40cfc7b2dc7000cd0a0be051c6a832bdbf880fee88550f2b409690cc18774","vendorSha256":"sha256-x/ZReaHGNsDshohcF4+p9Xj/JTK3gMUyeTgJkaN/eUc="} \ No newline at end of file diff --git a/scripts/benchmarks.sh b/scripts/benchmarks.sh deleted file mode 100755 index b072c9983a4..00000000000 --- a/scripts/benchmarks.sh +++ /dev/null @@ -1,19 +0,0 @@ -#!/bin/sh - -# runs inline benchmarks -# requires that app/benchmarks/benchmarks.exe is built -# run with -help to see available flags - -export BENCHMARKS_RUNNER=TRUE -export X_LIBRARY_INLINING=true - -GIT_ROOT="`git rev-parse --show-toplevel`" - -BENCHMARK_EXE=$GIT_ROOT/_build/default/src/app/benchmarks/benchmarks.exe - -if [ ! -f "$BENCHMARK_EXE" ]; then - echo "Please run 'make benchmarks' before running this script"; - exit 1 -fi - -exec $BENCHMARK_EXE "$@" -run-without-cross-library-inlining -suppress-warnings diff --git a/scripts/benchmarks/.gitignore b/scripts/benchmarks/.gitignore new file mode 100644 index 00000000000..749ccdafd4f --- /dev/null +++ b/scripts/benchmarks/.gitignore @@ -0,0 +1,4 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class diff --git a/scripts/benchmarks/README.md b/scripts/benchmarks/README.md new file mode 100644 index 00000000000..afeda98a98d --- /dev/null +++ b/scripts/benchmarks/README.md @@ -0,0 +1,97 @@ +# Benchmarks + +Python app for running all major mina benchmarks of various type + +- mina-benchmarks +- snark-profiler +- heap-usage +- zkapp-limits +- ledger-export + +It requires all underlying app to be present on os By default app uses +official name (like mina, mina-heap-usage etc.). + +In order to upload files to influx db all 4 influx env vars need to be defined: +- INFLUX_BUCKET_NAME +- INFLUX_ORG +- INFLUX_TOKEN +- INFLUX_HOST + +More details here: +https://docs.influxdata.com/influxdb/cloud/reference/cli/influx/#credential-precedence + +## Installation + +Project depends on Python in version 3+ + + +```commandline +pip install -r ./scripts/benchmarks/requirements.txt +``` + +## Usage + +python3 ./scripts/benchmarks run --benchmark mina-base --path _build/default/src/app/benchmarks/benchmarks.exe --influx --branch compatible --format csv --outfile mina_base.csv + +## Commands + +### ls + +Prints all supported benchmarks + +```commandline + python3 scripts/benchmarks ls +``` + +### run + +runs benchmark. + +INFO: each benchmark can have its own set of additional parameters + +example: +```commandline +python3 scripts/benchmarks run --benchmark snark --path _build/default/src/app/cli/src/mina.exe --branch compatible --outfile zkap_limits.csv +``` + +### parse + +Parses textual output of benchmark to csv + +```commandline +python3 scripts/benchmarks parse --benchmark mina-base --influx --branch compatible --infile output.out --outfile mina_base.csv +``` + + +### compare + +Compare result against moving average from influx db + +```commandline +python3 scripts/benchmarks compare --infile vrf_lib_tests_mina_base.csv --yellow-threshold 0.1 --red-threshold 0.2 +``` + +### upload + +Uploads data to influx db + +```commandline +python3 scripts/benchmarks upload --infile mina_base_mina_base.csv +``` + +### test + +Aggregates all above commands with logic to only upload data if branch is amongst mainline branches + +```commandline +python3 scripts/benchmarks test --benchmark snark --path _build/default/src/app/cli/src/mina.exe --branch compatible --tmpfile zkap_limits.csv +``` + + +## Further work + +Application is meant to be run in CI. Currently it exits when values exceeds moving average. +Some process need to be agreed how to handle situation where increase in value is expected and values should be uploaded to +influx db. One proposal is to add env var which can bypass comparison + additional logic which will allow value which exceeds +moving average but does not exceed highest one +(as we may end up in situation that moving average won't allow further values and we need to bypass them as well until avg will catchup with expected increase) diff --git a/scripts/benchmarks/__main__.py b/scripts/benchmarks/__main__.py new file mode 100644 index 00000000000..d76c3825a59 --- /dev/null +++ b/scripts/benchmarks/__main__.py @@ -0,0 +1,149 @@ +""" + Mina benchmark runner + + Capable of running,parsing to csv, comparing with historical data stored in influx and uploading to influx. + + Requirements: + + all INFLUX_* env vars need to be defined (INFLUX_HOST,INFLUX_TOKEN,INFLUX_BUCKET_NAME,INFLUX_ORG) + +""" + +import argparse +from pathlib import Path + +from lib import * + +parser = argparse.ArgumentParser(description='Executes mina benchmarks') +subparsers = parser.add_subparsers(dest="cmd") +run_bench = subparsers.add_parser('run') +run_bench.add_argument("--outfile", required=True, help="output file") +run_bench.add_argument("--benchmark", type= BenchmarkType, help="benchmark to run") +run_bench.add_argument("--influx", action='store_true', help = "Required only if --format=csv. Makes csv complaint with influx csv ") +run_bench.add_argument("--format", type=Format, help="output file format [text,csv]", default=Format.text) +run_bench.add_argument("--path", help="override path to benchmark") +run_bench.add_argument("--branch", default="test", help="Required only if --format=csv. Add branch name to csv file") +run_bench.add_argument("--genesis-ledger-path", default="./genesis_ledgers/devnet.json", help="Applicable only for ledger-export benchmark. Location of genesis config file") +run_bench.add_argument("--k", default=1) +run_bench.add_argument("--max-num-updates", default=4 , type=int) +run_bench.add_argument("--min-num-updates", default=2, type=int) + +parse_bench = subparsers.add_parser('parse',help="parse textual benchmark output to csv") +parse_bench.add_argument("--benchmark", type= BenchmarkType, help="benchmark to run") +parse_bench.add_argument("--infile",help="input file") +parse_bench.add_argument("--influx", action='store_true', help="assure output file is compliant with influx schena") +parse_bench.add_argument("--branch", help="adds additional colum in csv with branch from which benchmarks where built") +parse_bench.add_argument("--outfile", help="output file") + +compare_bench = subparsers.add_parser('compare', help="compare current data with historical downloaded from influx db") +compare_bench.add_argument("--benchmark", type= BenchmarkType, help="benchmark to run") +compare_bench.add_argument("--infile", help="input file") +compare_bench.add_argument("--yellow-threshold",help="defines how many percent current measurement can exceed average so app will trigger warning", + type=float, + choices=[Range(0.0, 1.0)], + default=0.1) +compare_bench.add_argument("--red-threshold",help="defines how many percent current measurement can exceed average so app will exit with error", + type=float, + choices=[Range(0.0, 1.0)], + default=0.2) + +upload_bench = subparsers.add_parser('upload') +upload_bench.add_argument("--infile") + +test_bench = subparsers.add_parser('test', help="Performs entire cycle of operations from run till upload") +test_bench.add_argument("--benchmark", type=BenchmarkType, help="benchmark to test") +test_bench.add_argument("--tmpfile", help="temporary location of result file") +test_bench.add_argument("--path") +test_bench.add_argument("--yellow-threshold", + help="defines how many percent current measurement can exceed average so app will trigger warning", + type=float, + choices=[Range(0.0, 1.0)], + default=0.1) +test_bench.add_argument("--red-threshold", + help="defines how many percent current measurement can exceed average so app will exit with error", + type=float, + choices=[Range(0.0, 1.0)], + default=0.2) +test_bench.add_argument("--branch", help="branch which was used in tests") +test_bench.add_argument("--genesis-ledger-path", default="./genesis_ledgers/devnet.json", help="Applicable only for ledger-export benchmark. Location of genesis config file") +test_bench.add_argument('-m','--mainline-branches', action='append', help='Defines mainline branch. If values of \'--branch\' parameter is among mainline branches then result will be uploaded') +test_bench.add_argument("--k", default=1) +test_bench.add_argument("--max-num-updates", default=4 , type=int) +test_bench.add_argument("--min-num-updates", default=2, type=int) + + +upload_bench = subparsers.add_parser('ls') + +args = parser.parse_args() + +logging.basicConfig(level=logging.DEBUG) + +default_mainline_branches = ["develop", "compatible", "master"] + + +def select_benchmark(kind): + if kind == BenchmarkType.mina_base: + return MinaBaseBenchmark() + elif kind == BenchmarkType.zkapp: + return ZkappLimitsBenchmark() + elif kind == BenchmarkType.heap_usage: + return HeapUsageBenchmark() + elif kind == BenchmarkType.snark: + return SnarkBenchmark(args.k, args.max_num_updates, args.min_num_updates) + elif kind == BenchmarkType.ledger_export: + if args.genesis_ledger_path is None: + print( + "--genesis-ledger-path need to be provided when running ledger export benchmark" + ) + exit(1) + return LedgerExportBenchmark(args.genesis_ledger_path) + +if args.cmd == "ls": + benches = [str(b) for b in BenchmarkType] + print("\n".join(benches)) + exit(0) + +if args.benchmark is None: + print("benchmark not selected") + exit(1) + +bench = select_benchmark(args.benchmark) + +if args.cmd == "run": + output = bench.run(path=args.path) + if args.format == "text": + with open(args.outfile, 'w') as file: + file.write(output) + else: + files = ",".join( + bench.parse(output, args.outfile, args.influx, args.branch)) + print(f"produced files: {files}") + +if args.cmd == "parse": + files = bench.parse(Path(args.infile).read_text(), args.outfile, args.influx, args.branch) + print(f'Parsed files: \n{",".join(files)}') + + +if args.cmd == "compare": + bench.compare(args.infile, args.yellow_threshold, args.red_threshold) + +if args.cmd == "upload": + bench.upload(args.infile) + +if args.cmd == "test": + output = bench.run(path=args.path) + files = bench.parse(output, + args.tmpfile, + influxdb=True, + branch=args.branch) + + [ + bench.compare(file, args.yellow_threshold, args.red_threshold) + for file in files + ] + + mainline_branches = default_mainline_branches if args.mainline_branches is None else args.mainline_branches + + if args.branch in mainline_branches: + for file in files: + bench.upload(file) \ No newline at end of file diff --git a/scripts/benchmarks/lib/__init__.py b/scripts/benchmarks/lib/__init__.py new file mode 100644 index 00000000000..d4e612aaf46 --- /dev/null +++ b/scripts/benchmarks/lib/__init__.py @@ -0,0 +1,3 @@ +from .influx import * +from .bench import * +from .utils import * diff --git a/scripts/benchmarks/lib/bench.py b/scripts/benchmarks/lib/bench.py new file mode 100644 index 00000000000..0034ade912f --- /dev/null +++ b/scripts/benchmarks/lib/bench.py @@ -0,0 +1,615 @@ +import re +from abc import ABC + +import parse +from pathlib import Path +import io +import os +from enum import Enum +import logging +from lib.utils import isclose, assert_cmd +from lib.influx import * + +import csv +import abc + +logger = logging.getLogger(__name__) + +class Benchmark(abc.ABC): + """ + Abstract class which aggregate all necessary operations + (run,parse) which then are implemented by children. + Moreover, for all general and common operations like upload it has concrete implementation + + """ + + def __init__(self, kind): + self.kind = kind + self.influx_client = Influx() + + def headers_to_influx(self, headers): + """ + Converts headers to influx db headers. Details: + https://docs.influxdata.com/influxdb/cloud/reference/syntax/annotated-csv/extended/ + """ + return "#datatype " + ",".join( + [header.influx_kind for header in headers]) + + @abc.abstractmethod + def default_path(self): + """ + Abstract method to get default path to app + """ + pass + + @abc.abstractmethod + def name_header(self): + """ + Abstract method for getting header object for measurement name + """ + pass + + @abc.abstractmethod + def branch_header(self): + """ + Abstract method for getting header object for branch name + """ + pass + + def headers_to_name(self, headers): + """ + Gets names of headers + """ + return list(map(lambda x: x.name, headers)) + + @abc.abstractmethod + def headers(self): + """ + Returns all csv headers + """ + pass + + @abc.abstractmethod + def fields(self): + """ + Returns subset of headers for influx field: + https://docs.influxdata.com/influxdb/cloud/reference/syntax/annotated-csv/extended/#field + """ + pass + + @abc.abstractmethod + def run(self, path): + """ + Runs benchmark + """ + pass + + @abc.abstractmethod + def parse(self, content, output_filename, influxdb, branch): + """ + Parses benchmark output to csv + """ + pass + + def compare(self, result_file, yellow_threshold, red_threshold): + """ + Compares actual measurements against thresholds (yellow,red) + + Constraints on result file: + - comma as delimiter + - implements influx csv format: + https://docs.influxdata.com/influxdb/cloud/reference/syntax/annotated-csv/extended/ + + It gets moving average from influx db and adds grace values (yellow,red) to handle measurements skew. + + """ + with open(result_file, newline='') as csvfile: + reader = csv.reader(csvfile, delimiter=',') + for i in range(2): + next(reader) + for row in reader: + for field in self.fields(): + value = float(row[field.pos]) + name = row[self.name_header().pos] + branch = row[self.branch_header().pos] + result = self.influx_client.query_moving_average( + name, branch, str(field), self.branch_header()) + + if not any(result): + logger.warning( + f"Skipping comparison for {name} as there are no historical data available yet" + ) + else: + average = float(result[-1].records[-1]["_value"]) + + current_red_threshold = average * red_threshold + current_yellow_threshold = average * yellow_threshold + + logger.debug( + f"calculated thresholds: [red={current_red_threshold},yellow={current_yellow_threshold}]" + ) + + if isclose(value + red_threshold, average): + logger.error( + f"{name} measurement exceeds time greatly ({value + current_red_threshold} against {average}). failing the build" + ) + exit(1) + elif isclose(value + yellow_threshold, average): + logger.warning( + f"WARNING: {name} measurement exceeds expected time ({value + current_yellow_threshold} against {average})" + ) + else: + logger.info( + f"comparison succesful for {name}. {value} is less than threshold [yellow={average + current_yellow_threshold},red={average + current_red_threshold}]" + ) + + def upload(self, file): + self.influx_client.upload_csv(file) + + +class BenchmarkType(Enum): + + mina_base = 'mina-base' + snark = 'snark' + heap_usage = 'heap-usage' + zkapp = 'zkapp' + ledger_export = 'ledger-export' + + def __str__(self): + return self.value + + +class JaneStreetBenchmark(Benchmark, ABC): + """ + Abstract class for native ocaml benchmarks which has the same format + + """ + name = MeasurementColumn("Name", 0) + time_per_runs = FieldColumn("Time/Run", 1, "us") + cycles_per_runs = FieldColumn("Cycls/Run", 2, "kc") + minor_words_per_runs = FieldColumn("mWd/Run", 3, "w") + major_words_per_runs = FieldColumn("mjWd/Run", 4, "w") + promotions_per_runs = FieldColumn("Prom/Run", 5, "w") + branch = TagColumn("gitbranch", 6) + + def __init__(self, kind): + Benchmark.__init__(self, kind) + + def headers(self): + return [ + MinaBaseBenchmark.name, MinaBaseBenchmark.time_per_runs, + MinaBaseBenchmark.cycles_per_runs, + MinaBaseBenchmark.minor_words_per_runs, + MinaBaseBenchmark.major_words_per_runs, + MinaBaseBenchmark.promotions_per_runs, MinaBaseBenchmark.branch + ] + + def fields(self): + return [ + MinaBaseBenchmark.time_per_runs, MinaBaseBenchmark.cycles_per_runs, + MinaBaseBenchmark.minor_words_per_runs, + MinaBaseBenchmark.major_words_per_runs, + MinaBaseBenchmark.promotions_per_runs + ] + + def name_header(self): + return self.name + + def branch_header(self): + return self.branch + + def export_to_csv(self, lines, filename, influxdb, branch): + """ + Exports Native Ocaml benchmarks to influxdb annotated csv + JaneStreet benchmarks has a common tabular layout. Similar to: + + | No.| Proof updates| Non-proof pairs| Non-proof singles| Mempool verification time (sec)| Transaction proving time (sec)|Permutation| + |--|--|--|--|--|--|--| + | 1| 0| 1| 1| 0.002070| 12.125372| SSS| + | 2| 1| 0| 2| 0.102019| 0.263364| SPS| + | 3| 1| 1| 0| 0.110309| 0.427459| SSP| + | 4| 2| 0| 1| 0.129152| 0.277442| SPP| + | 5| 0| 2| 0| 0.002546| 0.508766| SSSS| + | 6| 1| 1| 1| 0.135265| 0.384839| SPSS| + | 7| 2| 0| 2| 0.172069| 0.346551| SPPS| + """ + with open(filename, 'w') as csvfile: + + csvwriter = csv.writer(csvfile) + + if influxdb: + csvfile.write(self.headers_to_influx(self.headers()) + "\n") + + for line in lines: + if line.startswith('│'): + + rows = list(map(lambda x: x.strip(), line.split('│'))) + rows = list(filter(lambda x: x, rows)) + + if rows[0].startswith(MinaBaseBenchmark.name.name): + rows[ + 1] += " " + MinaBaseBenchmark.time_per_runs.format_unit( + ) + rows[ + 2] += " " + MinaBaseBenchmark.cycles_per_runs.format_unit( + ) + rows[ + 3] += " " + MinaBaseBenchmark.minor_words_per_runs.format_unit( + ) + rows[ + 4] += " " + MinaBaseBenchmark.major_words_per_runs.format_unit( + ) + rows[ + 5] += " " + MinaBaseBenchmark.promotions_per_runs.format_unit( + ) + rows.append("gitbranch") + + else: + # remove [.*] from name + rows[0] = re.sub('\[.*?\]', '', rows[0]).strip() + time = rows[1] + # remove units from values + if not time.endswith("us"): + if time.endswith("ns"): + time = float(time[:-2]) * 1_000 + rows[1] = time + else: + raise Exception( + "Time can be expressed only in us or ns") + else: + # us + rows[1] = time[:-2] + # kc + rows[2] = rows[2][:-2] + # w + rows[3] = rows[3][:-1] + # w + rows[4] = rows[4][:-1] + # w + rows[5] = rows[5][:-1] + rows.append(branch) + + csvwriter.writerow(rows[:]) + + def parse(self, content, output_filename, influxdb, branch): + """ + Parses output of standard jane street benchmark to csv. Format is well known and similar to below: + | No.| Proof updates| Non-proof pairs| Non-proof singles| Mempool verification time (sec)| Transaction proving time (sec)|Permutation| + |--|--|--|--|--|--|--| + | 1| 0| 1| 1| 0.002070| 12.125372| SSS| + .... + + It can produce standard csv of annotated influx db csv + """ + buf = io.StringIO(content) + lines = buf.readlines() + + starts = [] + ends = [] + files = [] + for i, e in enumerate(lines): + if "Running" in e: + starts.append(i) + + if not any(starts): + self.export_to_csv(lines, output_filename, influxdb, branch) + else: + for start in starts[1:]: + ends.append(start) + + ends.append(len(lines) - 1) + + for start, end in zip(starts, ends): + name = parse.parse('Running inline tests in library "{}"', + lines[start].strip())[0] + file = f'{name}_{output_filename}' + logger.info(f"exporting {file}..") + self.export_to_csv(lines[start:end], f'{file}', influxdb, + branch) + files.append(file) + + return files + + +class MinaBaseBenchmark(JaneStreetBenchmark): + """ + Concrete implementation of JaneStreetBenchmark for mina-base benchmarks + """ + + def __init__(self): + JaneStreetBenchmark.__init__(self, BenchmarkType.mina_base) + + def run(self, path=None): + path = self.default_path() if path is None else path + cmd = [ + path, "time", "cycles", "alloc", "-clear-columns", "-all-values", + "-width", "1000", "-run-without-cross-library-inlining", + "-suppress-warnings" + ] + envs = os.environ.copy() + envs["BENCHMARKS_RUNNER"] = "TRUE" + envs["X_LIBRARY_INLINING"] = "true" + + return assert_cmd(cmd, envs) + + def default_path(self): + return "mina-benchmarks" + + +class LedgerExportBenchmark(JaneStreetBenchmark): + """ + Concrete implementation of JaneStreetBenchmark for ledger export benchmark. + Ledger export requires also genesis ledger config + """ + + def __init__(self, genesis_ledger_path): + JaneStreetBenchmark.__init__(self, BenchmarkType.ledger_export) + self.genesis_ledger_path = genesis_ledger_path + + def run(self, path=None): + path = self.default_path() if path is None else path + cmd = [ + path, "time", "cycles", "alloc", "-clear-columns", "-all-values", + "-width", "1000" + ] + envs = os.environ.copy() + envs["RUNTIME_CONFIG"] = self.genesis_ledger_path + + return assert_cmd(cmd, envs) + + def default_path(self): + return "mina-ledger-export-benchmark" + + +class ZkappLimitsBenchmark(Benchmark): + """ + ZkappLimit benchmark has it's own output which we need to handle separately. It is similar to: + + Proofs updates=0 Signed/None updates=0 Pairs of Signed/None updates=1: Total account updates: 2 Cost: 10.080000 + Proofs updates=0 Signed/None updates=0 Pairs of Signed/None updates=2: Total account updates: 4 Cost: 20.160000 + Proofs updates=0 Signed/None updates=0 Pairs of Signed/None updates=3: Total account updates: 6 Cost: 30.240000 + Proofs updates=0 Signed/None updates=0 Pairs of Signed/None updates=4: Total account updates: 8 Cost: 40.320000 + Proofs updates=0 Signed/None updates=0 Pairs of Signed/None updates=5: Total account updates: 10 Cost: 50.400000 + Proofs updates=0 Signed/None updates=0 Pairs of Signed/None updates=6: Total account updates: 12 Cost: 60.480000 + Proofs updates=0 Signed/None updates=1 Pairs of Signed/None updates=0: Total account updates: 1 Cost: 9.140000 + Proofs updates=0 Signed/None updates=1 Pairs of Signed/None updates=1: Total account updates: 3 Cost: 19.220000 + + """ + + name = MeasurementColumn("Name", 0) + proofs_updates = FieldColumn("proofs updates", 1, "") + signed_updates = FieldColumn("signed updates", 2, "") + pairs_of_signed = FieldColumn("pairs of signed", 3, "") + total_account_updates = FieldColumn("total account updates", 4, "") + cost = FieldColumn("cost", 5, "") + category = TagColumn("category", 6) + branch = TagColumn("gitbranch", 7) + + def __init__(self): + Benchmark.__init__(self, BenchmarkType.zkapp) + + def default_path(self): + return "mina-zkapp-limits" + + def fields(self): + return [ + self.proofs_updates, self.pairs_of_signed, + self.total_account_updates, self.cost + ] + + def name_header(self): + return self.name + + def branch_header(self): + return self.branch + + def headers(self): + return [ + ZkappLimitsBenchmark.name, ZkappLimitsBenchmark.proofs_updates, + ZkappLimitsBenchmark.signed_updates, + ZkappLimitsBenchmark.pairs_of_signed, + ZkappLimitsBenchmark.total_account_updates, + ZkappLimitsBenchmark.cost, ZkappLimitsBenchmark.category, + ZkappLimitsBenchmark.branch + ] + + def parse(self, content, output_filename, influxdb, branch): + + buf = io.StringIO(content) + lines = buf.readlines() + + stats = [list(map(lambda x: x.name, self.headers()))] + + for line in lines: + if line == '': + continue + + syntax = "Proofs updates=(?P\d+) Signed/None updates=(?P\d+) Pairs of Signed/None updates=(?P\d+): Total account updates: (?P\d+) Cost: (?P[0-9]*[.]?[0-9]+)" + + match = re.match(syntax, line) + + if match: + proofs_updates = int(match.group("proofs_updates")) + signed_updates = int(match.group("signed_updates")) + pairs_of_signed_updates = int( + match.group("pairs_of_signed_updates")) + total_account_updates = int( + match.group("total_account_updates")) + cost = float(match.group(ZkappLimitsBenchmark.cost.name)) + name = f"P{proofs_updates}S{signed_updates}PS{pairs_of_signed_updates}TA{total_account_updates}" + tag = "zkapp" + stats.append((name, proofs_updates, signed_updates, + pairs_of_signed_updates, total_account_updates, + cost, tag, branch)) + + with open(output_filename, 'w') as csvfile: + if influxdb: + csvfile.write( + self.headers_to_influx(self.headers()) + "\n") + csvwriter = csv.writer(csvfile) + csvwriter.writerows(stats) + + return [output_filename] + + def run(self, path=None): + path = self.default_path() if path is None else path + return assert_cmd([path]) + + +class SnarkBenchmark(Benchmark): + + name = MeasurementColumn("name", 0) + proofs_updates = FieldColumn("proofs updates", 1, "") + nonproofs_pairs = FieldColumn("non-proof pairs", 2, "") + nonproofs_singles = FieldColumn("non-proof singles", 3, "") + verification_time = FieldColumn("verification time", 4, "[s]") + proving_time = FieldColumn("value", 5, "[s]") + category = TagColumn("category", 6) + branch = TagColumn("gitbranch", 7) + + def name_header(self): + return self.name + + def branch_header(self): + return self.branch + + def __init__(self, k , max_num_updates, min_num_updates ): + Benchmark.__init__(self, BenchmarkType.snark) + self.k = k + self.max_num_updates = max_num_updates + self.min_num_updates = min_num_updates + + def headers(self): + return [ + SnarkBenchmark.name, SnarkBenchmark.proofs_updates, + SnarkBenchmark.nonproofs_pairs, SnarkBenchmark.nonproofs_singles, + SnarkBenchmark.verification_time, SnarkBenchmark.proving_time, + SnarkBenchmark.category, SnarkBenchmark.branch + ] + + def fields(self): + return [ + SnarkBenchmark.proofs_updates, SnarkBenchmark.nonproofs_pairs, + SnarkBenchmark.nonproofs_singles, SnarkBenchmark.verification_time, SnarkBenchmark.proving_time + ] + + def parse(self, content, output_filename, influxdb, branch): + buf = io.StringIO(content) + lines = buf.readlines() + rows = [] + category = "snark" + rows.append(list(map(lambda x: x.name, self.headers()))) + + for line in lines: + if line.startswith("|"): + if "--" in line: + continue + elif line.startswith("| No.|"): + continue + else: + cols = line.split("|") + cols = list(map(lambda x: x.strip(), cols)) + cols = list(filter(lambda x: x, cols)) + + #| No.| Proof updates| Non-proof pairs| Non-proof singles| Mempool verification time (sec)| Transaction proving time (sec)|Permutation| + proof_update = cols[1] + non_proof_pairs = cols[2] + non_proof_singles = cols[3] + verification_time = cols[4] + proving_time = cols[5] + name = cols[6] + + rows.append((name,proof_update,non_proof_pairs,non_proof_singles,verification_time,proving_time, + category,branch)) + + with open(output_filename, 'w') as csvfile: + if influxdb: + csvfile.write(self.headers_to_influx(self.headers()) + "\n") + + csvwriter = csv.writer(csvfile) + csvwriter.writerows(rows) + + return [ output_filename ] + + def default_path(self): + return "mina" + + def run(self, path=None): + path = self.default_path() if path is None else path + return assert_cmd([ + path, "transaction-snark-profiler", "--zkapps", "--k", + str(self.k), "--max-num-updates", + str(self.max_num_updates), "--min-num-updates", + str(self.min_num_updates) + ]) + + +class HeapUsageBenchmark(Benchmark): + """ + Heap Usage benchmark is another example of non standard benchmark. + Output is similar like: + + Data of type Zkapp_command.t uses 52268 heap words = 418144 bytes + Data of type Pickles.Side_loaded.Proof.t uses 3467 heap words = 27736 bytes + Data of type Mina_base.Side_loaded_verification_key.t uses 897 heap words = 7176 bytes + Data of type Dummy Pickles.Side_loaded.Proof.t uses 2672 heap words = 21376 bytes + Data of type Dummy Mina_base.Side_loaded_verification_key.t uses 99 heap words = 792 bytes + ... + """ + + name = MeasurementColumn("Name", 0) + heap_words = FieldColumn("heap words", 1, "") + bytes = FieldColumn("bytes", 2, "") + category = TagColumn("category", 3) + branch = TagColumn("gitbranch", 4) + + def __init__(self): + Benchmark.__init__(self, BenchmarkType.heap_usage) + + def name_header(self): + return self.name + + def branch_header(self): + return self.branch + + def headers(self): + return [ + HeapUsageBenchmark.name, HeapUsageBenchmark.heap_words, + HeapUsageBenchmark.bytes, HeapUsageBenchmark.category, + HeapUsageBenchmark.branch + ] + + def fields(self): + return [ + HeapUsageBenchmark.heap_words, + HeapUsageBenchmark.bytes + ] + + def parse(self, content, output_filename, influxdb, branch): + buf = io.StringIO(content) + lines = buf.readlines() + rows = [] + rows.append(self.headers_to_name(self.headers())) + + for i, line in enumerate(lines): + if line.startswith("Data of type"): + sanitized_line = line.replace(" ", "").strip() + row = list( + parse.parse("Dataoftype{}uses{}heapwords={}bytes", + sanitized_line)) + row.extend(("heap_usage", branch)) + rows.append(row) + + with open(output_filename, 'w') as csvfile: + if influxdb: + csvfile.write(self.headers_to_influx(self.headers()) + "\n") + csvwriter = csv.writer(csvfile) + csvwriter.writerows(rows) + return [output_filename] + + def default_path(self): + return "mina-heap-usage" + + def run(self, path=None): + path = self.default_path() if path is None else path + return assert_cmd([path]) diff --git a/scripts/benchmarks/lib/influx.py b/scripts/benchmarks/lib/influx.py new file mode 100644 index 00000000000..a45c1c0a39c --- /dev/null +++ b/scripts/benchmarks/lib/influx.py @@ -0,0 +1,165 @@ +import logging +import os +import subprocess +import time +from pathlib import Path + +import influxdb_client + +logger = logging.getLogger(__name__) + + +class HeaderColumn: + """ + Specialized column class for influx upload. + It accepts influx_kind [string,double,tag..] and pos which helps find it in csv when parsing + """ + + def __init__(self, name, influx_kind, pos): + self.name = name + self.influx_kind = influx_kind + self.pos = pos + + +class MeasurementColumn(HeaderColumn): + """ + Column header which represents influx measurement header + """ + + def __init__(self, name, pos): + HeaderColumn.__init__(self, name, influx_kind="measurement", pos=pos) + + +class FieldColumn(HeaderColumn): + """ + Column header which represents influx field header. + It has additional unit field which can be formatted as part of name + Currently field is always a double (there was no need so far for different type) + """ + + def __init__(self, name, pos, unit=None): + HeaderColumn.__init__(self, name, influx_kind="double", pos=pos) + self.unit = unit + + def __str__(self): + if self.unit: + return f"{self.name} [{self.unit}]" + else: + return f"{self.name}" + + def format_unit(self): + return f"[{self.unit}]" + + +class TagColumn(HeaderColumn): + """ + Specialized header for inglux tag + """ + + def __init__(self, name, pos): + HeaderColumn.__init__(self, name, influx_kind="tag", pos=pos) + + +class Influx: + """ + Influx helper which wraps influx cli and python api + It requires INFLUX_* env vars to be set + and raises RuntimeException if they are not defined + """ + + host = "INFLUX_HOST" + token = "INFLUX_TOKEN" + org = "INFLUX_ORG" + bucket = "INFLUX_BUCKET_NAME" + + @staticmethod + def check_envs(): + """ + In order to talk with influx db we need to have some env vars defined. + This method verifies correct setup + """ + + if Influx.host not in os.environ: + raise RuntimeError(f"{Influx.host} env var not defined") + if Influx.token not in os.environ: + raise RuntimeError(f"{Influx.token} env var not defined") + if Influx.org not in os.environ: + raise RuntimeError(f"{Influx.org} env var not defined") + if Influx.bucket not in os.environ: + raise RuntimeError(f"{Influx.bucket} env var not defined") + + def client(self): + Influx.check_envs() + return influxdb_client.InfluxDBClient( + url=os.environ[Influx.host], + token=os.environ[Influx.token], + org=os.environ[Influx.org], + bucket=os.environ[Influx.bucket]) + + def __init__(self, moving_average_size=10): + self.moving_average_size = moving_average_size + + def __get_moving_average_query(self, name, branch, field, branch_header): + """ + Constructs moving average query from influx for comparison purposes + + Moving average size is configured in class constructor + """ + + bucket = os.environ[Influx.bucket] + return f"from(bucket: \"{bucket}\") \ + |> range(start: -{self.moving_average_size}) \ + |> filter (fn: (r) => (r[\"{branch_header.name}\"] == \"{branch}\" ) \ + and r._measurement == \"{name}\" \ + and r._field == \"{field}\" ) \ + |> keep(columns: [\"_value\"]) \ + |> movingAverage(n:{self.moving_average_size}) " + + def query_moving_average(self, name, branch, field, branch_header): + """ + Retrieves moving average from influx db for particular + branch and field + """ + + query = self.__get_moving_average_query(name, branch, field, + branch_header) + logger.debug(f"running influx query: {query}") + query_api = self.client().query_api() + return query_api.query(query) + + def upload_csv(self, file): + """ + Uploads csv to influx db. File need to be formatter according to influx requirements: + https://docs.influxdata.com/influxdb/cloud/reference/syntax/annotated-csv/ + + WARNING: InfluxDb write api is not very friendly with csv which contains more than measurement + in csv file (which is our case). I decided to use influx cli as it supports multiple measurements in + single csv file. + Unfortunately influx cli has nasty issue when calling from python similar to: + (similar to hanging queries problem: https://community.influxdata.com/t/influxdb-hanging-queries/1522). + My workaround is to use --http-debug flag, then read output of command and if there is 204 status code + returned i kill influx cli + """ + + if not Path(file).is_file(): + raise RuntimeError(f"cannot find {file}") + + if not open(file).readline().rstrip().startswith("#datatype"): + raise RuntimeError( + f"{file} is badly formatted and not eligible for uploading to influx db. " + f"see more at https://docs.influxdata.com/influxdb/cloud/reference/syntax/annotated-csv/" + ) + + process = subprocess.Popen([ + "influx", "write", "--http-debug", "--format=csv", f"--file={file}" + ], + stderr=subprocess.PIPE) + + timeout = time.time() + 60 # 1 minute + while True: + line = process.stderr.readline() + if b"HTTP/2.0 204 No Content" in line or time.time() > timeout: + process.kill() + break + + logger.info(f"{file} uploaded to influx db") diff --git a/scripts/benchmarks/lib/utils.py b/scripts/benchmarks/lib/utils.py new file mode 100644 index 00000000000..382dce31d04 --- /dev/null +++ b/scripts/benchmarks/lib/utils.py @@ -0,0 +1,54 @@ +import subprocess +import logging +from enum import Enum + +logger = logging.getLogger(__name__) + + +def isclose(a, b, rel_tol=1e-09, abs_tol=0.0): + return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol) + + +def assert_cmd(cmd, envs=None): + logger.debug(f"running command {cmd}") + result = subprocess.run(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=envs) + + if result.returncode != 0: + err = result.stderr.decode("UTF-8") + logger.error( + f"{cmd} resulted in errorcode {result.returncode} with message {err}" + ) + raise RuntimeError(f"cmd failed: {cmd} with stderr: {err}") + + output = result.stdout.decode("UTF-8") + logger.debug(f"command output: {output}") + return output + +class Range(object): + + def __init__(self, start, end): + self.start = start + self.end = end + + def __eq__(self, other): + return self.start <= other <= self.end + + def __contains__(self, item): + return self.__eq__(item) + + def __iter__(self): + yield self + + def __str__(self): + return '[{0},{1}]'.format(self.start, self.end) + + +class Format(Enum): + csv = 'csv' + text = 'text' + + def __str__(self): + return self.value diff --git a/scripts/benchmarks/requirements.txt b/scripts/benchmarks/requirements.txt new file mode 100644 index 00000000000..22ccbdce44d --- /dev/null +++ b/scripts/benchmarks/requirements.txt @@ -0,0 +1,2 @@ +influxdb_client==1.46.0 +parse==1.20.1 diff --git a/scripts/benchmarks/result_comparator.py b/scripts/benchmarks/result_comparator.py new file mode 100644 index 00000000000..783b325af2d --- /dev/null +++ b/scripts/benchmarks/result_comparator.py @@ -0,0 +1,27 @@ +import csv +import argparse +import subprocess + + +parser = argparse.ArgumentParser(description='Calculate actual benchmark values against influx db') +parser.add_argument('--infile', + help='input csv file with actual benchmark') +parser.add_argument('--red-threshold', + help='value above which app return exit 1') +parser.add_argument('--yellow-threshold', + help='value above which app return warning', + ) +args = parser.parse_args() + +with open(args.infile, newline='') as csvfile: + rows = list(csv.reader(csvfile)) + + headers_rows = rows[1] + name_pos = [ i for i,x in enumerate(headers_rows) if x == "Name"][0] + branch_pos = [ i for i,x in enumerate(headers_rows) if x == "gitbranch"][0] + + for items in rows[2:]: + name = items[name_pos] + branch = items[branch_pos] + output = subprocess.run(["influx", "query", f'from(bucket: "mina-benchmarks") |> range(start: -10d) |> filter (fn: (r) => (r._tag["gitbranch"] == "{branch}" ) and r._measurement == "{name}") |> keep(columns: ["_value"]) |> movingAverage(n:1) ']).stdout.read() + print(output) \ No newline at end of file diff --git a/scripts/benchmarks/result_parser.py b/scripts/benchmarks/result_parser.py new file mode 100755 index 00000000000..d5d0e5e0f09 --- /dev/null +++ b/scripts/benchmarks/result_parser.py @@ -0,0 +1,218 @@ +import csv +import argparse +import re +from parse import * +from pathlib import Path + +from enum import Enum + + +class Benchmark(Enum): + tabular = 'tabular' + snark = 'snark' + heap_usage = 'heap-usage' + zkapp = 'zkapp' + + def __str__(self): + return self.value + + +def export_to_csv(lines, filename, influxdb, branch): + with open(filename, 'w') as csvfile: + + csvwriter = csv.writer(csvfile) + + if influxdb: + csvfile.write("#datatype measurement,double,double,double,double,tag\n") + + for line in lines: + if line.startswith('│'): + + rows = list(map(lambda x: x.strip(), line.split('│'))) + rows = list(filter(lambda x: x, rows)) + + if rows[0].startswith("Name"): + rows[1] += " [us]" + rows[2] += " [kc]" + rows[3] += " [w]" + rows[4] += " [w]" + rows.append("gitbranch") + + else: + # remove [.*] from name + rows[0] = re.sub('\[.*?\]', '', rows[0]).strip() + time = rows[1] + # remove units from values + if not time.endswith("us"): + if time.endswith("ns"): + time = float(time[:-2])* 1_000 + rows[1] = time + else: + raise Exception("Time can be expressed only in us or ns") + else: + # us + rows[1] = time[:-2] + # kc + rows[2] = rows[2][:-2] + # w + rows[3] = rows[3][:-1] + # w + rows[4] = rows[4][:-1] + + rows.append(branch) + + csvwriter.writerow(rows[:]) + + +def parse_zkapp_limits(input_filename, output_filename, influxdb, branch): + with open(input_filename, 'r', encoding='UTF-8') as file: + lines = file.readlines() + stats = [] + header = ["proofs updates", "signed updates", "pairs of signed", "total account updates", "cost" , "gitbranch"] + stats.append(header) + + for line in lines: + if line == '': + continue + + syntax = "Proofs updates=(?P\d+) Signed/None updates=(?P\d+) Pairs of Signed/None updates=(?P\d+): Total account updates: (?P\d+) Cost: (?P[0-9]*[.]?[0-9]+)" + + match = re.match(syntax, line) + + if match: + proofs_updates = int(match.group('proofs_updates')) + signed_updates = int(match.group('signed_updates')) + pairs_of_signed_updates = int(match.group('pairs_of_signed_updates')) + total_account_updates = int(match.group('total_account_updates')) + cost = float(match.group('cost')) + name = f"P{proofs_updates}S{signed_updates}PS{pairs_of_signed_updates}TA{total_account_updates}" + tag = "zkapp" + stats.append((name,proofs_updates, signed_updates, pairs_of_signed_updates, total_account_updates, cost, tag, branch)) + + with open(output_filename, 'w') as csvfile: + if influxdb: + csvfile.write("#datatype measurement,double,double,double,double,double,tag\n") + csvwriter = csv.writer(csvfile) + csvwriter.writerows(stats) + + +def parse_snark_format(input_filename, output_filename, influxdb, branch): + with open(input_filename, 'r', encoding='UTF-8') as file: + lines = file.readlines() + stats = [] + zkapps = [] + + header = ["measurement", "proof updates", "nonproofs", "value", "tag", "gitbranch"] + stats.append(header) + + for line in lines: + if line == '': + continue + + syntax = 'Generated zkapp transactions with (?P\d+) updates and (?P\d+) proof updates in (?P