From 4a09f7d2b43ec4e0d35c1c83dc42c3d8c7b980e8 Mon Sep 17 00:00:00 2001 From: Roger Barker Date: Mon, 6 Jan 2025 16:03:42 -0600 Subject: [PATCH] chore: Cherry pick fixes for update of default branch to main (#17200) (#17235) Signed-off-by: Roger Barker Signed-off-by: Jendrik Johannes Co-authored-by: Jendrik Johannes Co-authored-by: Andrew Brandt Co-authored-by: Thomas Moran <152873392+thomas-swirlds-labs@users.noreply.github.com> --- .../workflows/flow-artifact-determinism.yaml | 4 +- .../node-flow-build-application.yaml | 5 +- .../node-flow-deploy-release-artifact.yaml | 43 +++++- ... => node-zxcron-main-fsts-regression.yaml} | 4 +- .github/workflows/node-zxf-snyk-monitor.yaml | 4 +- ... platform-zxcron-main-jrs-regression.yaml} | 4 +- .github/workflows/zxc-jrs-regression.yaml | 6 +- .../workflows/zxcron-extended-test-suite.yaml | 14 +- .../zxcron-promote-build-candidate.yaml | 8 +- .../workflows/zxf-collect-workflow-logs.yaml | 72 ++++++++++ .../zxf-prepare-extended-test-suite.yaml | 8 +- docs/branch-naming-conventions.md | 9 +- ...continuous-integration-testing-overview.md | 8 +- docs/glossary.md | 4 +- docs/maintainers-guide.md | 29 ++-- hedera-node/docs/design/app/workflows.md | 14 +- .../cancun-fork-support.md | 25 ++-- .../contract-accounts-nonces.md | 21 +-- hedera-node/docs/dev/JRS-GettingStarted.md | 136 +++++++++--------- platform-sdk/README.md | 2 +- platform-sdk/description.txt | 2 +- .../docs/core/address-book-management.md | 10 +- platform-sdk/docs/core/wiring-diagram.svg | 2 +- platform-sdk/docs/proposals/README.md | 2 +- .../docs/proposals/metric-labels/README.md | 93 ++++++------ .../model/diagram/HyperlinkBuilder.java | 10 +- .../cli/JrsTestReaderReportCommand.java | 6 +- 27 files changed, 325 insertions(+), 220 deletions(-) rename .github/workflows/{node-zxcron-develop-fsts-regression.yaml => node-zxcron-main-fsts-regression.yaml} (94%) rename .github/workflows/{platform-zxcron-develop-jrs-regression.yaml => platform-zxcron-main-jrs-regression.yaml} (94%) create mode 100644 .github/workflows/zxf-collect-workflow-logs.yaml diff --git a/.github/workflows/flow-artifact-determinism.yaml b/.github/workflows/flow-artifact-determinism.yaml index f638b1c915bd..ee54787bc084 100644 --- a/.github/workflows/flow-artifact-determinism.yaml +++ b/.github/workflows/flow-artifact-determinism.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2023-2024 Hedera Hashgraph, LLC +# Copyright (C) 2023-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ on: default: "21.0.4" push: branches: - - develop + - main - 'release/**' tags: - 'v*.*.*' diff --git a/.github/workflows/node-flow-build-application.yaml b/.github/workflows/node-flow-build-application.yaml index 8630c2aa0358..c7b27610460d 100644 --- a/.github/workflows/node-flow-build-application.yaml +++ b/.github/workflows/node-flow-build-application.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2022-2024 Hedera Hashgraph, LLC +# Copyright (C) 2022-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -50,7 +50,6 @@ on: default: "temurin" push: branches: - - develop - main - 'release/*' @@ -124,7 +123,7 @@ jobs: with: workflow: .github/workflows/node-flow-deploy-release-artifact.yaml repo: hashgraph/hedera-services # ensure we are executing in the hashgraph org - ref: develop # ensure we are always using the workflow definition from the develop branch + ref: main # ensure we are always using the workflow definition from the main branch token: ${{ secrets.GH_ACCESS_TOKEN }} inputs: '{ "ref": "${{ steps.workflow-inputs.outputs.input-ref }}", diff --git a/.github/workflows/node-flow-deploy-release-artifact.yaml b/.github/workflows/node-flow-deploy-release-artifact.yaml index 836f754e54f0..3d001b7c4cc0 100644 --- a/.github/workflows/node-flow-deploy-release-artifact.yaml +++ b/.github/workflows/node-flow-deploy-release-artifact.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2022-2024 Hedera Hashgraph, LLC +# Copyright (C) 2022-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -41,6 +41,11 @@ defaults: run: shell: bash +permissions: + id-token: write + contents: read + actions: read + jobs: prepare-tag-release: name: Prepare Release [Tag] @@ -150,28 +155,52 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: '0' - ref: develop + ref: main token: ${{ secrets.GH_ACCESS_TOKEN }} - - name: Trigger ZXF Prepare Extended Test Suite + - name: Check Prep XTS Job State + id: check-xts-job if: ${{ needs.release-branch.result == 'success' }} + env: + GH_TOKEN: ${{ github.token }} + run: | + JOB_ENABLED="true" + JOB_STATE=$(gh workflow list --all --json name,state | jq -r '.[]|select(.name=="ZXF: Prepare Extended Test Suite")|.state') + [[ "${JOB_STATE}" == "disabled_manually" ]] && JOB_ENABLED="false" + echo "enabled=${JOB_ENABLED}" >> $GITHUB_OUTPUT + + - name: Trigger ZXF Prepare Extended Test Suite + if: ${{ needs.release-branch.result == 'success' && steps.check-xts-job.outputs.enabled == 'true' }} uses: step-security/workflow-dispatch@4d1049025980f72b1327cbfdeecb07fe7a20f577 # v1.2.4 with: workflow: .github/workflows/zxf-prepare-extended-test-suite.yaml repo: hashgraph/hedera-services # ensure we are executing in the hashgraph org - ref: develop # ensure we are always using the workflow definition from the develop branch + ref: main # ensure we are always using the workflow definition from the main branch token: ${{ secrets.GH_ACCESS_TOKEN }} inputs: '{ "ref": "${{ inputs.ref }}" }' - - name: Trigger ZXF Deploy Integration + - name: Check Integration Job State + id: check-integration-job if: ${{ needs.release-branch.result == 'success' && + (inputs.author != '' && inputs.msg != '' && inputs.sha != '') && + !cancelled() }} + env: + GH_TOKEN: ${{ github.token }} + run: | + JOB_ENABLED="true" + JOB_STATE=$(gh workflow list --all --json name,state | jq -r '.[]|select(.name=="ZXF: [Node] Deploy Integration Network Release")|.state') + [[ "${JOB_STATE}" == "disabled_manually" ]] && JOB_ENABLED="false" + echo "enabled=${JOB_ENABLED}" >> $GITHUB_OUTPUT + + - name: Trigger ZXF Deploy Integration + if: ${{ needs.release-branch.result == 'success' && steps.check-integration-job.outputs.enabled == 'true' && (inputs.author != '' && inputs.msg != '' && inputs.sha != '') && !cancelled() }} uses: step-security/workflow-dispatch@4d1049025980f72b1327cbfdeecb07fe7a20f577 # v1.2.4 with: workflow: .github/workflows/node-zxf-deploy-integration.yaml repo: hashgraph/hedera-services # ensure we are executing in the hashgraph org - ref: develop # ensure we are always using the workflow definition from the develop branch + ref: main # ensure we are always using the workflow definition from the main branch token: ${{ secrets.GH_ACCESS_TOKEN }} inputs: '{ "ref": "${{ inputs.ref }}", @@ -195,7 +224,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: token: ${{ secrets.GH_ACCESS_TOKEN }} - ref: develop + ref: main fetch-depth: '0' - name: Checkout Hedera Protobufs Code diff --git a/.github/workflows/node-zxcron-develop-fsts-regression.yaml b/.github/workflows/node-zxcron-main-fsts-regression.yaml similarity index 94% rename from .github/workflows/node-zxcron-develop-fsts-regression.yaml rename to .github/workflows/node-zxcron-main-fsts-regression.yaml index df2b6a3d7352..d896c4b688b4 100644 --- a/.github/workflows/node-zxcron-develop-fsts-regression.yaml +++ b/.github/workflows/node-zxcron-main-fsts-regression.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2022-2024 Hedera Hashgraph, LLC +# Copyright (C) 2022-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # limitations under the License. ## -name: "ZXCron: [Node] Develop JRS Tests" +name: "ZXCron: [Node] Main JRS Tests" on: workflow_dispatch: diff --git a/.github/workflows/node-zxf-snyk-monitor.yaml b/.github/workflows/node-zxf-snyk-monitor.yaml index 17ec6b73d936..c639d335d038 100644 --- a/.github/workflows/node-zxf-snyk-monitor.yaml +++ b/.github/workflows/node-zxf-snyk-monitor.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2023-2024 Hedera Hashgraph, LLC +# Copyright (C) 2023-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -19,7 +19,7 @@ name: "ZXF: Snyk Monitor" on: push: branches: - - develop + - main workflow_dispatch: permissions: diff --git a/.github/workflows/platform-zxcron-develop-jrs-regression.yaml b/.github/workflows/platform-zxcron-main-jrs-regression.yaml similarity index 94% rename from .github/workflows/platform-zxcron-develop-jrs-regression.yaml rename to .github/workflows/platform-zxcron-main-jrs-regression.yaml index 6cfc2218a2c0..f6682c657b3d 100644 --- a/.github/workflows/platform-zxcron-develop-jrs-regression.yaml +++ b/.github/workflows/platform-zxcron-main-jrs-regression.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2022-2024 Hedera Hashgraph, LLC +# Copyright (C) 2022-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -14,7 +14,7 @@ # limitations under the License. ## -name: "ZXCron: [Platform] Develop JRS Regression" +name: "ZXCron: [Platform] Main JRS Regression" on: schedule: - cron: '30 5 * * *' diff --git a/.github/workflows/zxc-jrs-regression.yaml b/.github/workflows/zxc-jrs-regression.yaml index 7251b7b95320..adc3b54fd837 100644 --- a/.github/workflows/zxc-jrs-regression.yaml +++ b/.github/workflows/zxc-jrs-regression.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2022-2024 Hedera Hashgraph, LLC +# Copyright (C) 2022-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -490,8 +490,8 @@ jobs: fi if [[ -n "${HEDERA_TESTS_ENABLED}" && "${HEDERA_TESTS_ENABLED}" = true ]]; then - # Override for the develop branch - if [[ "${SLACK_BRANCH}" != "develop" ]]; then + # Override for the main branch + if [[ "${SLACK_BRANCH}" != "main" ]]; then SLACK_SUMMARY="hedera-gcp-${SLACK_BRANCH}-summary" SLACK_RESULTS="hedera-gcp-${SLACK_BRANCH}-regression" else diff --git a/.github/workflows/zxcron-extended-test-suite.yaml b/.github/workflows/zxcron-extended-test-suite.yaml index 960c88ee32cb..8fc4d522b6f9 100644 --- a/.github/workflows/zxcron-extended-test-suite.yaml +++ b/.github/workflows/zxcron-extended-test-suite.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2023-2024 Hedera Hashgraph, LLC +# Copyright (C) 2023-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -57,12 +57,12 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: '0' - ref: develop + ref: main token: ${{ secrets.GH_ACCESS_TOKEN }} # Check if the xts-candidate tag exists # the command git branch --contains xts-tag-commit | grep --quiet - # will return an exit code of 1 if the tagged commit is not found on the develop + # will return an exit code of 1 if the tagged commit is not found on the main # branch. - name: Check for tags id: check-tags-exist @@ -93,9 +93,9 @@ jobs: gh run cancel ${{ github.run_id }} fi - # Check if the tag exists on the develop branch + # Check if the tag exists on the main branch set +e - git branch --contains "${XTS_COMMIT}" | grep --quiet develop >/dev/null 2>&1 + git branch --contains "${XTS_COMMIT}" | grep --quiet main >/dev/null 2>&1 BRANCH_ON_DEVELOP="${?}" set -e @@ -103,7 +103,7 @@ jobs: AUTHOR_NAME=$(git log -1 --format='%an' "${XTS_COMMIT}") AUTHOR_EMAIL=$(git log -1 --format='%ae' "${XTS_COMMIT}") - # If the tag exists on the Develop Branch set the output variables as appropriate + # If the tag exists on the Main Branch set the output variables as appropriate # Otherwise cancel out if [[ "${BRANCH_ON_DEVELOP}" -eq 0 ]]; then echo "xts-tag-exists=true" >> $GITHUB_OUTPUT @@ -259,7 +259,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: '0' - ref: develop + ref: main token: ${{ secrets.GH_ACCESS_TOKEN }} - name: Collect run logs in a log file diff --git a/.github/workflows/zxcron-promote-build-candidate.yaml b/.github/workflows/zxcron-promote-build-candidate.yaml index 75534890691e..2169d7cd2616 100644 --- a/.github/workflows/zxcron-promote-build-candidate.yaml +++ b/.github/workflows/zxcron-promote-build-candidate.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2023-2024 Hedera Hashgraph, LLC +# Copyright (C) 2023-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -52,7 +52,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: '0' - ref: develop + ref: main token: ${{ secrets.GH_ACCESS_TOKEN }} - name: Find Build Candidates @@ -74,8 +74,8 @@ jobs: gh run cancel ${{ github.run_id }} fi - # Verify the commit is on develop and continue - if git branch --contains "${CANDIDATE_COMMIT}" | grep --quiet develop >/dev/null 2>&1; then + # Verify the commit is on main and continue + if git branch --contains "${CANDIDATE_COMMIT}" | grep --quiet main >/dev/null 2>&1; then git push --delete origin $(git tag --list "${TAG_PATTERN}") git tag --delete $(git tag --list "${TAG_PATTERN}") echo "build-candidate-exists=true" >> "${GITHUB_OUTPUT}" diff --git a/.github/workflows/zxf-collect-workflow-logs.yaml b/.github/workflows/zxf-collect-workflow-logs.yaml new file mode 100644 index 000000000000..c221e0381f5d --- /dev/null +++ b/.github/workflows/zxf-collect-workflow-logs.yaml @@ -0,0 +1,72 @@ +## +# Copyright (C) 2023-2025 Hedera Hashgraph, LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +## + +name: "ZXF: Collect Workflow Run Logs" +on: + workflow_dispatch: + inputs: + workflow_id: + description: "ID of the workflow run to collect logs from (example: #123456789)" + required: true + +defaults: + run: + shell: bash + +jobs: + collect-logs: + name: Collect Workflow Run Logs + runs-on: network-node-linux-medium + steps: + - name: Harden Runner + uses: step-security/harden-runner@0080882f6c36860b6ba35c610c98ce87d4e2f26f # v2.10.2 + with: + egress-policy: audit + + - name: Get run ID from run number + env: + GH_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} + id: run_id + run: | + RUN_ID=$(gh api -H "Accept: application/vnd.github+json" \ + /repos/hashgraph/hedera-services/actions/workflows/zxcron-extended-test-suite.yaml/runs \ + --jq '.workflow_runs[] | select(.run_number == ${{ inputs.workflow_id }}) | .id') + echo "::set-output name=value::$RUN_ID" + + - name: Checkout Code + uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 + with: + fetch-depth: '0' + ref: main + token: ${{ secrets.GH_ACCESS_TOKEN }} + + - name: Get run logs + env: + GH_TOKEN: ${{ secrets.GH_ACCESS_TOKEN }} + run: gh run view ${{ steps.run_id.outputs.value }} --log >> workflow-run.log + + - name: Upload log as artifact + id: upload-log + uses: actions/upload-artifact@6f51ac03b9356f520e9adb1b1b7802705f340c2b # v4.5.0 + with: + path: workflow-run.log + + - name: Share log information + run: | + echo "### Logs Collected for Workflow:" >> $GITHUB_STEP_SUMMARY + echo "Workflow run ID: ${{ inputs.workflow_id }}" >> $GITHUB_STEP_SUMMARY + echo "Workflow URL: https://github.com/hashgraph/hedera-services/actions/runs/${{ steps.run_id.outputs.value }}" >> $GITHUB_STEP_SUMMARY + echo "Log file download URL: ${{ steps.upload-log.outputs.artifact-url }}" >> $GITHUB_STEP_SUMMARY diff --git a/.github/workflows/zxf-prepare-extended-test-suite.yaml b/.github/workflows/zxf-prepare-extended-test-suite.yaml index 732bbe154693..716470795e83 100644 --- a/.github/workflows/zxf-prepare-extended-test-suite.yaml +++ b/.github/workflows/zxf-prepare-extended-test-suite.yaml @@ -1,5 +1,5 @@ ## -# Copyright (C) 2023-2024 Hedera Hashgraph, LLC +# Copyright (C) 2023-2025 Hedera Hashgraph, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -50,7 +50,7 @@ jobs: uses: actions/checkout@11bd71901bbe5b1630ceea73d27597364c9af683 # v4.2.2 with: fetch-depth: '0' - ref: 'develop' + ref: 'main' token: ${{ secrets.GH_ACCESS_TOKEN }} - name: Validate Input Ref @@ -58,10 +58,10 @@ jobs: env: COMMIT_ID: ${{ inputs.ref }} run: | - if git merge-base --is-ancestor "${COMMIT_ID}" develop >/dev/null 2>&1; then + if git merge-base --is-ancestor "${COMMIT_ID}" main >/dev/null 2>&1; then echo "commit_on_dev=true" >> $GITHUB_OUTPUT else - echo "::error title=Branch Alignment::The provided commit (${COMMIT_ID}) is not present on the develop branch." + echo "::error title=Branch Alignment::The provided commit (${COMMIT_ID}) is not present on the main branch." exit 1 fi diff --git a/docs/branch-naming-conventions.md b/docs/branch-naming-conventions.md index c77ccb33ac26..841bc329a4d8 100644 --- a/docs/branch-naming-conventions.md +++ b/docs/branch-naming-conventions.md @@ -20,18 +20,19 @@ naming standard. ### Permanent & Default Branches -The repository will contain two permanent branches as per the GitFlow Workflow `main` and `develop` -branches. +The repository will contain one permanent branch, `main`, per the Hashgraph Continuous Integration +Test and Release workflow **Default Branches** -The default branch for a repository will be `develop` as per the branching workflow. +The default branch for a repository will be `main` as per the Hashgraph Continuous Integration +Test and Release workflow. ### Branch to Issue Relationship Aside from the permanent or release branches, no short-lived (feature, hotfix, bugfix) branch should be created without being associated to an issue number. No short-lived branch should be merged into -`develop` or `main` without an associated and approved pull request. +`main` without an associated and approved pull request. ### Feature Branch Naming diff --git a/docs/continuous-integration-testing-overview.md b/docs/continuous-integration-testing-overview.md index 036a82d983de..6afc43f6e52c 100644 --- a/docs/continuous-integration-testing-overview.md +++ b/docs/continuous-integration-testing-overview.md @@ -20,11 +20,11 @@ two major components of CITR: MATS and XTS. MATS is the Minimal Acceptable Test Suite; this suite of tests is run against every pull request (PR) that is opened in the `hashgraph/hedera-services` repository. -XTS is the eXtended Test Suite; this suite of tests is run against the latest commit on the develop branch every three +XTS is the eXtended Test Suite; this suite of tests is run against the latest commit on the main branch every three hours (provided there is a new commit to run against). MATS tests are inclusive of a series of unit tests and performance tests that must be executed against a PR branch prior -to merging into develop. The MATS tests are intended to complete within a 30-minute time window to provide developers +to merging into main. The MATS tests are intended to complete within a 30-minute time window to provide developers with valuable insight of the impact of new code on the default branch. XTS tests are run against the default branch once every three hours. These cover test cases that are unable to complete @@ -38,14 +38,14 @@ There is an additional workflow: `ZXF: Extended Test Suite - Dry Run` which is a `hashgraph/hedera-services` repository. The XTS Dry-Run workflow runs a provided commit on any branch through the same XTS tests that would be run against the -latest on develop every three hours. This workflow is run with a manual trigger and will execute in parallel to any +latest on main every three hours. This workflow is run with a manual trigger and will execute in parallel to any other actions ongoing in the `hashgraph/hedera-services` repository. A developer can manually trigger a run using the parameters in the web UI: ```text Use Workflow From - Branch: develop # this should always be `develop` + Branch: main # this should always be `main` The commit sha to check out The branch name, for JRS Panel output diff --git a/docs/glossary.md b/docs/glossary.md index 929f3be8a125..b83eb5cc03c6 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -193,9 +193,9 @@ This file contains the address book to use when starting from genesis. **Congestion Pricing**: A mechanism designed to manage network congestion by dynamically adjusting transaction fees based on network demand. The primary goal of congestion pricing is to discourage excessive network usage during peak times. Refer to -[Congestion Pricing](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/docs/fees/automated-congestion-pricing.md) +[Congestion Pricing](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/docs/fees/automated-congestion-pricing.md) and -[Fees](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/docs/design/app/fees.md). +[Fees](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/docs/design/app/fees.md). ## Consensus Time diff --git a/docs/maintainers-guide.md b/docs/maintainers-guide.md index 755796a0bfa3..34502f5d0319 100644 --- a/docs/maintainers-guide.md +++ b/docs/maintainers-guide.md @@ -58,12 +58,10 @@ pattern for the development life cycle. ![gitflow-branching-model](./assets/gitflow-branching-model.png) -Note especially the roles of the `main` and `develop` branches: +Note especially the roles of the `main` branch: -- `develop` is the default branch, the target of active development, and should at all times +- `main` is the default branch, the target of active development, and should at all times should be a viable candidate for the next release. -- `main` is a tightly-controlled branch that release engineering uses for final tags deployed to - production. ### Creating issues on GitHub @@ -92,35 +90,34 @@ with 0.30 milestone on it. ![labels-on-issue](./assets/labels-on-issue.png) -### Release Engineering Responsibilities +### DevOps-CI Responsibilities -The release engineering team will handle the following: +The DevOps-CI team will handle the following: -- Create a release branch from `develop` branch at the end of first sprint in the release cycle -- Will merge the release branch for current deploying release into `main` - Will provide automated release processes and coordinate release schedules - Will handle production releases +- Note: no release branch will be created ### User Stories #### As a developer, I would like to create a branch to work on the feature for the upcoming release -As per the development model, every developer should create a feature branch from `develop` branch +As per the development model, every developer should create a feature branch from `main` branch for working on a change targeted for the current release. The created branch should follow [naming conventions](branch-naming-conventions.md). -The `develop` branch should be up-to-date with all the features going into the next release. +The `main` branch should be up-to-date with all the features going into the next release. #### As a developer, I would like to create a branch to work on the feature NOT targeted for upcoming release -As per the development model, every developer should create a feature branch to work from `develop` +As per the development model, every developer should create a feature branch to work from `main` branch. The created branch should follow [naming conventions](branch-naming-conventions.md). But, -the feature branch should NOT be merged into `develop` until the decision is made if the feature is +the feature branch should NOT be merged into `main` until the decision is made if the feature is going into upcoming release. #### As a developer, I would like to merge my feature branch or bug fix for the upcoming release -Open a pull request (PR) from the feature branch to `develop` branch and add +Open a pull request (PR) from the feature branch to `main` branch and add `hashgraph/hedera-services-team` as reviewers. Also add the following labels on the PR : @@ -134,7 +131,7 @@ PR should be merged after an approving review and all the checks are passed. NOTE: 1. Any feature that is not going into the upcoming release should stay in the feature branch and - should not be merged to `develop`. + should not be merged to `main`. 2. Please use either the Gradle command line `./gradlew qualityGate` or the [Google Java Format IntelliJ Plugin](https://github.com/google/google-java-format#intellij-android-studio-and-other-jetbrains-ides) to format your code to avoid failing checks in CI pipeline. @@ -146,14 +143,14 @@ Once the release branch is created, only bugfixes or hotfixes should be merged i To do that, create a `hotfix` from the `release` branch. The created branch should follow [naming conventions](branch-naming-conventions.md). Once the fix is in the branch, open a PR to the release branch. Once the fix is merged into `release` branch, it should be cherry-picked into the -`develop` branch. +`main` branch. #### As a developer, I would like to merge a bugfix/hotfix from the production code To fix a bug from one of the previous releases(production code), create a hotfix branch from `main`. Once the fix is in the branch, create a PR targeting to `main`. Once bugfix is merged into `main`and it should be cherry-picked back into the current `release` branch(if the release branch is still -open), and also into `develop`. +open). ### DCO Sign Off diff --git a/hedera-node/docs/design/app/workflows.md b/hedera-node/docs/design/app/workflows.md index bab72146b5d2..b1532d5b6439 100644 --- a/hedera-node/docs/design/app/workflows.md +++ b/hedera-node/docs/design/app/workflows.md @@ -118,18 +118,18 @@ All the objects used while handling the transaction belong to one of the followi Examples include the `NodeInfo` and `WorkingStateAccessor`. - **UserTxnScope** - Objects that are created once for platform transaction. Examples include the `Configuration`, `RecordListBuilder` and `TokenContext`. -Dagger provides all the objects that can be constructed in this scope [here](https://github.com/hashgraph/hedera-services/tree/develop/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/txn/modules) -and [UserTxnComponent](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/txn/UserTransactionComponent.java) +Dagger provides all the objects that can be constructed in this scope [here](https://github.com/hashgraph/hedera-services/tree/main/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/txn/modules) +and [UserTxnComponent](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/txn/UserTransactionComponent.java) takes all the inputs that are needed to execute the user transaction. - **UserDispatchScope** - Objects that are created once for each user transaction that will be dispatched. Examples include the `SingleTransactionRecordBuilder` for user transaction and `FeeContext`. -Dagger provides all the objects that can be constructed in this scope in [UserDispatchModule](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/dispatch/user/modules/UserDispatchModule.java) and `UserDispatchComponent`. -and [UserDispatchComponent](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/dispatch/user/UserDispatchComponent.java) +Dagger provides all the objects that can be constructed in this scope in [UserDispatchModule](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/dispatch/user/modules/UserDispatchModule.java) and `UserDispatchComponent`. +and [UserDispatchComponent](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/dispatch/user/UserDispatchComponent.java) takes all the inputs that are needed to create the user dispatch. - **ChildDispatchScope** - Objects that are created once for each child transaction dispatch. Examples include the `ReadableStoreFactory` and `ChildFeeContext`. -Dagger provides all the objects that can be constructed in the [ChildDispatchModule](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/dispatch/child/modules/ChildDispatchModule.java) -and [ChildDispatchComponent](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/dispatch/child/ChildDispatchComponent.java) +Dagger provides all the objects that can be constructed in the [ChildDispatchModule](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/dispatch/child/modules/ChildDispatchModule.java) +and [ChildDispatchComponent](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/dispatch/child/ChildDispatchComponent.java) takes all the inputs that are needed to create the child dispatch. #### HandleWorkflow overview: @@ -169,7 +169,7 @@ The overall high level steps are as follows: The `DispatchProcessor.processDispatch` will be called for user and child dispatches. This avoids duplicating any logic between user and child transactions, since both are treated as dispatches. -For the child transactions, when a service calls one of the [dispatchXXXTransaction](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/DispatchHandleContext.java#L459) +For the child transactions, when a service calls one of the [dispatchXXXTransaction](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-app/src/main/java/com/hedera/node/app/workflows/handle/flow/DispatchHandleContext.java#L459) methods in `DispatchHandleContext`, a new child dispatch is created and `DispatchProcessor.processDispatch` is called. 1. **Error Validation:** Checks if there is any error by node or user by re-assessing preHandleResult. It validates the following: diff --git a/hedera-node/docs/design/services/smart-contract-service/cancun-fork-support.md b/hedera-node/docs/design/services/smart-contract-service/cancun-fork-support.md index ddb829613f3a..2ce10eb8c9ac 100644 --- a/hedera-node/docs/design/services/smart-contract-service/cancun-fork-support.md +++ b/hedera-node/docs/design/services/smart-contract-service/cancun-fork-support.md @@ -3,14 +3,14 @@ ## Purpose The Ethereum "Cancun" hardfork introduces a number of changes to the EVM that will need to be implemented to maintain EVM -Equivalence. There are four different HIPs covering this feature (tracked in - epic [#11697](https://github.com/hashgraph/hedera-services/issues/11697): +Equivalence. There are four different HIPs covering this feature (tracked in +epic [#11697](https://github.com/hashgraph/hedera-services/issues/11697): -* [HIP-865](https://hips.hedera.com/hip/hip-865): Add EVM Support for transient storage and memory +* [HIP-865](https://hips.hedera.com/hip/hip-865): Add EVM Support for transient storage and memory copy Cancun opcodes (issue [#11699](https://github.com/hashgraph/hedera-services/issues/11699)) * [HIP-866](https://hips.hedera.com/hip/hip-866): Add EVM compatibility for non-supported Cancun blob features (issue [#11700](https://github.com/hashgraph/hedera-services/issues/11700)) -* [HIP-867](https://hips.hedera.com/hip/hip-867): Add Smart Contract Services Support for KZG Point +* [HIP-867](https://hips.hedera.com/hip/hip-867): Add Smart Contract Services Support for KZG Point Evaluation Precompiled Function (issue [#11701](https://github.com/hashgraph/hedera-services/issues/11701)) * [HIP-868](https://hips.hedera.com/hip/hip-868): Support Cancun Self-Destruct Semantics in Smart Contract Services (issue [#11702](https://github.com/hashgraph/hedera-services/issues/11702)) @@ -27,7 +27,6 @@ Generally speaking there are four strategies that will be used: ### Example User Story - * As a smart contract developer, I want to use current versions of solidity that may generate opcodes that are only available in the Cancun fork. * These opcodes will enable me to write safer contracts at less gas cost (e.g., transient storage opcodes) @@ -36,7 +35,7 @@ Generally speaking there are four strategies that will be used: specifications change (e.g., `SELFDESTRUCT`) * As a Hedera developer, I want to preserve maximum future design space to adopt, or not adopt, blobs. * As an end user, I want prompt and accurate failures if I attempt to use Blob features in Hedera. - * And as a smart contract develop I want attempts to _use_ internal blob-support features (e.g., + * And as a smart contract developer I want attempts to _use_ internal blob-support features (e.g., opcodes `VERSIONEDHASH` and `BLOBBASEFEE`) to behave in a predictable manner ## Goals @@ -64,12 +63,12 @@ Cancun support in mono-services. ### New Cancun EVM A new EVM version will be created that will be based off of the Cancun EVM - which is from a Besu -release `>24.1.2`. A new enum value, `HederaEvmVersion.VERSION_050` will have the value `v0.50`. +release `>24.1.2`. A new enum value, `HederaEvmVersion.VERSION_050` will have the value `v0.50`. -Setting the `@ConfigProperty` `ContractsConfig.evmVersion` (string value `contracts.evm.version`) +Setting the `@ConfigProperty` `ContractsConfig.evmVersion` (string value `contracts.evm.version`) to `v0.50` (from `v0.46`) will activate the Cancun EVM. -Simply activating the Cancun EVM - with no changes or overrides - will immediately provide the +Simply activating the Cancun EVM - with no changes or overrides - will immediately provide the following features: * `MCOPY` operation @@ -81,7 +80,7 @@ following features: Update `CustomGasCalculator` to inherit from Besu's `CancunGasCalculator`. * (This needs to be part of the regular EVM module upgrade - it was last updated for the London -release, wasn't done for Shanghai.) + release, wasn't done for Shanghai.) ### KZG precompile initialization @@ -111,10 +110,9 @@ sufficient. - ### Update Hedera's CustomSelfDestructOperation behavior -The current Hedera override class, `CustomSelfDestructOperation`, will be updated so that it registers, +The current Hedera override class, `CustomSelfDestructOperation`, will be updated so that it registers, with the frame, the executing contract for deletion if either: * pre-Cancun semantics, or @@ -122,7 +120,7 @@ with the frame, the executing contract for deletion if either: * the latter information is available in the frame itself A constructor parameter will choose which semantics to implement (which matches the way BESU does it, - though it isn't _necessary_ to match it). +though it isn't _necessary_ to match it). ## Acceptance Tests @@ -152,4 +150,3 @@ Verify that the new behavior of the `SELFDESTRUCT` operation is correct: * For both of the above, verify that the hbar balance is correctly sent to their beneficiary * But only where allowed by Hedera semantics (e.g., w.r.t. beneficiary account existence and type, and required signatures) - diff --git a/hedera-node/docs/design/services/smart-contract-service/contract-accounts-nonces.md b/hedera-node/docs/design/services/smart-contract-service/contract-accounts-nonces.md index 311a0b10e552..dfccfb3656fe 100644 --- a/hedera-node/docs/design/services/smart-contract-service/contract-accounts-nonces.md +++ b/hedera-node/docs/design/services/smart-contract-service/contract-accounts-nonces.md @@ -14,13 +14,14 @@ In order to provide more complete EVM account equivalence support and a better d - Handle nonce updates for EOAs ## Assumptions + - Mirror Node is able to process nonce updates through transaction records ## Architecture The following is a table with general use cases and behavior for Ethereum and Hedera: -| Use case | Behavior in Ethereum | Behavior in Hedera (current) | Behavior in Hedera (desired) | +| Use case | Behavior in Ethereum | Behavior in Hedera (current) | Behavior in Hedera (desired) | |-----------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | EOA transaction (`EthereumTransaction`) | EOA nonce is incremented | EOA nonce is incremented, updates are not externalized to Mirror Node through transaction records, but can be picked up by reading the value from the `EthereumTransaction` body | EOA nonce is incremented, updates are not externalized to Mirror Node through transaction records, but can be picked up by reading the value from the `EthereumTransaction` body | | EOA transaction (`ContractCall` or `ContractCreate`) | - | - | - | @@ -28,13 +29,14 @@ The following is a table with general use cases and behavior for Ethereum and He | Contract transaction resulting in `CREATE/CREATE2` (`ContractCall` or `ContractCreate`) | - | initial contract nonce value is 1; nonce is incremented for each contract creation initiated by an account, updates are not externalized to Mirror Node | initial contract nonce value is 1; nonce is incremented for each contract creation initiated by an account, updates are externalized to Mirror Node | ### Contract Nonce Externalization -- We keep a `ContractId -> nonce` tree map inside [HederaWorldState](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/store/contracts/HederaWorldState.java#L79), it is updated on each call of `commit()` (using newly added method `trackContractNonces()`). -- Method `trackContractNonces` in [HederaWorldState](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/store/contracts/HederaWorldState.java#L393) follows the pattern of `trackNewlyCreatedAccounts`. - - Checks if an account is a new smart contract and externalizes its nonce. - - Checks if an existing smart contract's nonce is updated and externalizes it. -- Added a `ContractId -> nonce` tree map inside [TransactionProcessingResult](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/contracts/execution/TransactionProcessingResult.java#L45). -- Persists account contract nonces into state in [ContractCreateTransitionLogic](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/txns/contract/ContractCreateTransitionLogic.java#L209) and [ContractCallTransitionLogic](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/txns/contract/ContractCallTransitionLogic.java#L148) using `setContractNonces` from `TransactionProcessingResult`. -- Created new [ContractNonceInfo](https://github.com/hashgraph/hedera-services/blob/develop/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/state/submerkle/ContractNonceInfo.java) submerkle class with two main entities - `contractId` and `nonce` + +- We keep a `ContractId -> nonce` tree map inside [HederaWorldState](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/store/contracts/HederaWorldState.java#L79), it is updated on each call of `commit()` (using newly added method `trackContractNonces()`). +- Method `trackContractNonces` in [HederaWorldState](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/store/contracts/HederaWorldState.java#L393) follows the pattern of `trackNewlyCreatedAccounts`. + - Checks if an account is a new smart contract and externalizes its nonce. + - Checks if an existing smart contract's nonce is updated and externalizes it. +- Added a `ContractId -> nonce` tree map inside [TransactionProcessingResult](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/contracts/execution/TransactionProcessingResult.java#L45). +- Persists account contract nonces into state in [ContractCreateTransitionLogic](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/txns/contract/ContractCreateTransitionLogic.java#L209) and [ContractCallTransitionLogic](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/txns/contract/ContractCallTransitionLogic.java#L148) using `setContractNonces` from `TransactionProcessingResult`. +- Created new [ContractNonceInfo](https://github.com/hashgraph/hedera-services/blob/main/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/state/submerkle/ContractNonceInfo.java) submerkle class with two main entities - `contractId` and `nonce` - Added new method `serializableContractNoncesFrom` in [EvmFnResult](https://github.com/hashgraph/hedera-services/blob/96a85f0e08f82582bbf25328d14ca90fc630c5ef/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/state/submerkle/EvmFnResult.java) that builds `List` (submerkle) from `Map` - Added new verison `7` (`RELEASE_0400_VERSION`) and externalized logic for `serialize` and `deserialize` of `contractNonces` in `EvmFnResult` @@ -44,7 +46,7 @@ The following is a table with general use cases and behavior for Ethereum and He - For all created and updated contracts we should store their nonces in state. - We need a way to track a contract account's `nonce` by its `address`. - - Added method `setNonce` in [UpdateAccountTracker](https://github.com/hashgraph/hedera-services/blob/96a85f0e08f82582bbf25328d14ca90fc630c5ef/hedera-node/hedera-evm/src/main/java/com/hedera/node/app/service/evm/store/UpdateAccountTracker.java) - - - Added method `setNonce` in [UpdateAccountTrackerImpl](https://github.com/hashgraph/hedera-services/blob/96a85f0e08f82582bbf25328d14ca90fc630c5ef/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/store/UpdateAccountTrackerImpl.java) + - - Added method `setNonce` in [UpdateAccountTrackerImpl](https://github.com/hashgraph/hedera-services/blob/96a85f0e08f82582bbf25328d14ca90fc630c5ef/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/store/UpdateAccountTrackerImpl.java) - - It sets property `ETHEREUM_NONCE` for `address` into `trackingAccounts` - Updated `setNonce` in [UpdateTrackingAccount](https://github.com/hashgraph/hedera-services/blob/96a85f0e08f82582bbf25328d14ca90fc630c5ef/hedera-node/hedera-evm/src/main/java/com/hedera/node/app/service/evm/store/models/UpdateTrackingAccount.java#L142) to use [UpdateAccountTrackerImpl](https://github.com/hashgraph/hedera-services/blob/96a85f0e08f82582bbf25328d14ca90fc630c5ef/hedera-node/hedera-mono-service/src/main/java/com/hedera/node/app/service/mono/store/UpdateAccountTrackerImpl.java#L51)'s implementation - `AbstractLedgerWorldUpdater` -> `createAccount` -> `newMutable.setNonce(nonce)` @@ -87,6 +89,7 @@ When contract is merged into an existing hollow account, we should update implem * Verify that when contract A is merged into hollow account H, the nonce of the resulting account is set to 1. * Verify that when contract A is merged into hollow account H and the init code of A also deploys contract B, the nonce of the resulting account is 2 and the nonce of contract B is set to 1. * Verify that when feature flag is disabled contract nonces won't be externalized. + ### Negative Tests * Verify that when contract A fails to deploy contract B, the nonce of contract A is still incremented by 1. diff --git a/hedera-node/docs/dev/JRS-GettingStarted.md b/hedera-node/docs/dev/JRS-GettingStarted.md index 9a4d8c604994..2918390da908 100644 --- a/hedera-node/docs/dev/JRS-GettingStarted.md +++ b/hedera-node/docs/dev/JRS-GettingStarted.md @@ -1,35 +1,39 @@ # Java Regression Suite(JRS) Developer Testing + # **Table of Contents** - [Description](#description) - [Overview](#overview) - - [File Types](#file-types) - - [Naming Conventions for the JSONs](#naming-conventions) - - [Instructions for kicking off Regression from local machine](#instructions) + - [File Types](#file-types) + - [Naming Conventions for the JSONs](#naming-conventions) + - [Instructions for kicking off Regression from local machine](#instructions) - [Services Nightly Regression](#nightly-regression) - [GCP SetUp](#gcp_setup) # **Description** -This document describes the steps needed to run a JRS test using the infrastructure in + +This document describes the steps needed to run a JRS test using the infrastructure in swirlds-platform-regression. To be able to run using JRS, it is necessary to [set up GCP](https://github.com/hashgraph/hedera-services/blob/docs/dev/GCP-setup.md) on your local machine. # **Overview** + The Java Regression Suite (JRS) runs on a remote machine. It relies on two types of json files, i.e., Regression configuration json and experiment configuration json described below. JRS infrastructure provisions the machines and run experiments based on the configurations set in the JSONs. + ## **File Types** -1. _Regression configuration JSON :_ The file name usually starts with `GCP_` (Eg : `GCP-Personal-Neeha-4N.json`,`GCP-Daily-Services-Comp-Basic-4N-1C.json`). Based on the credentials after [GCP setup](#gcp_setup), it holds information of - - `cloud` cloud configuration that includes the private keys, regions in which nodes need to be instantiated etc., - - `experiments` experiments to run. - - `slack` slack details needed to post the results. Use `hedera-regression-test` channel for testing. `hedera-regression`/`hedera-regression-summary` channel needs to be used exclusively for nightly regression results. - - `result` results folder in the local machine to download the logs after test. - - other minor details. - -2. _Experiment configuration JSON :_ It holds specific information on the set up of the experiment and validations to be done to tell if it passed. It majorly includes + +1. _Regression configuration JSON :_ The file name usually starts with `GCP_` (Eg : `GCP-Personal-Neeha-4N.json`,`GCP-Daily-Services-Comp-Basic-4N-1C.json`). Based on the credentials after [GCP setup](#gcp_setup), it holds information of + - `cloud` cloud configuration that includes the private keys, regions in which nodes need to be instantiated etc., + - `experiments` experiments to run. + - `slack` slack details needed to post the results. Use `hedera-regression-test` channel for testing. `hedera-regression`/`hedera-regression-summary` channel needs to be used exclusively for nightly regression results. + - `result` results folder in the local machine to download the logs after test. + - other minor details. +2. _Experiment configuration JSON :_ It holds specific information on the set up of the experiment and validations to be done to tell if it passed. It majorly includes - `duration` duration of the test in seconds. - `settings` platform settings that needs to be overridden. - `runConfig` types of the run configuration based on the test type. Eg: `RECONNECT`, `RESTART`, `RECOVER` etc., @@ -43,8 +47,8 @@ In order to avoid duplication of data, both JSONs support inheritance from `pare ``` "parentList": [ "configs/services/default/standard-services-settings.json" - "configs/services/default/standard-services-configs.json", - "configs/services/default/standard-services-validators.json" + "configs/services/default/standard-services-configs.json", + "configs/services/default/standard-services-validators.json" ], ``` @@ -54,44 +58,45 @@ It should be created under `swirlds-platform/regression/configs/services/suites/ **NOTE:** `name` field in any Regression JSON should be of limited length. While creating instances timestamp is added to this name and GCP has limitation on the length of the name constructed for creating instances. Providing very long `name` will cause failure to create instances. - **Example of a Personal JSON** + ``` { "parentList": [ - "configs/services/JRS-Default-Services.json", - "configs/services/GCP-Default-Services-4N.json", - "configs/services/suites/daily/JRS-Daily-Default-Services-Slack.json" + "configs/services/JRS-Default-Services.json", + "configs/services/GCP-Default-Services-4N.json", + "configs/services/suites/daily/JRS-Daily-Default-Services-Slack.json" ], "name": "GCP-Personal-GCPTEST", "result": { - "uri": "results/Personal" + "uri": "results/Personal" }, "cloud": { - "login": "gcptest", - "keyLocation": "./keys/my-key", - "projectName": "hedera-regression", - "regionList": [ - { - "region": "us-east1-b", - "numberOfNodes": 4, - "numberOfTestClientNodes": 1 - } - ] + "login": "gcptest", + "keyLocation": "./keys/my-key", + "projectName": "hedera-regression", + "regionList": [ + { + "region": "us-east1-b", + "numberOfNodes": 4, + "numberOfTestClientNodes": 1 + } + ] }, "experiments": [ - "configs/services/tests/Misc-Basic-14-20m.json" --> experiment json + "configs/services/tests/Misc-Basic-14-20m.json" --> experiment json ], "slack": { - "channel": "hedera-regression-test", - "summaryChannel": "hedera-regression-test", - "notifyOn": "ERROR", - "notifyUserIds": [ - "SLACK_USER_ID" - ] + "channel": "hedera-regression-test", + "summaryChannel": "hedera-regression-test", + "notifyOn": "ERROR", + "notifyUserIds": [ + "SLACK_USER_ID" + ] } } ``` + **Example of a basic experiment JSON** ``` @@ -99,54 +104,57 @@ Providing very long `name` will cause failure to create instances. "name": "Misc-Basic-14-20m", "description": "SuiteRunner-Basic-Suites-JRS", "parentList": [ - "configs/services/default/standard-services-configs.json", - "configs/services/default/standard-services-settings.json", - "configs/services/default/standard-services-validators.json" + "configs/services/default/standard-services-configs.json", + "configs/services/default/standard-services-settings.json", + "configs/services/default/standard-services-validators.json" ], "duration": 1200, "hederaServicesConfig": { - "testSuites": [ - "ControlAccountsExemptForUpdates", - "UmbrellaRedux", - "TopicCreateSpecs", - "SubmitMessageSpecs", - "TopicUpdateSpecs", - "HCSTopicFragmentationSuite", - "TopicGetInfoSpecs", - "CryptoCreateSuite", - "CryptoRecordSanityChecks", - "SignedTransactionBytesRecordsSuite", - "SuperusersAreNeverThrottled", - "FileRecordSanityChecks", - "VersionInfoSpec" - ], - "fixedNode": true + "testSuites": [ + "ControlAccountsExemptForUpdates", + "UmbrellaRedux", + "TopicCreateSpecs", + "SubmitMessageSpecs", + "TopicUpdateSpecs", + "HCSTopicFragmentationSuite", + "TopicGetInfoSpecs", + "CryptoCreateSuite", + "CryptoRecordSanityChecks", + "SignedTransactionBytesRecordsSuite", + "SuperusersAreNeverThrottled", + "FileRecordSanityChecks", + "VersionInfoSpec" + ], + "fixedNode": true } } ``` -## **Naming Conventions for the JSONs** -Naming conventions described in [file](https://github.com/swirlds/swirlds-platform-regression/blob/develop/docs/regression-test-naming-standards.md) are required to be followed for both types of configuration JSONs. + +## **Naming Conventions for the JSONs** + +Naming conventions described in [file](https://github.com/swirlds/swirlds-platform-regression/blob/main/docs/regression-test-naming-standards.md) are required to be followed for both types of configuration JSONs. Any new naming conventions need to be added to the file if required, after seeking approval from the code owners in `swirlds-platform-regression` repository. + ## **Instructions for kicking off Regression from local machine** - Open terminal. - Clone `hedera-services` and `swirlds-platform` repositories. -- Checkout the branches needed in both the repositories. +- Checkout the branches needed in both the repositories. - Add the experiment to be run in user's personal JSON `configs/services/suites/personal/GCP-Personal-XXX.json` in experiments section, as the example below. + ``` "experiments": [ "configs/services/tests/reconnect/SmartContractOps-NIReconnect-14-21m.json" ] ``` -- Run the following command to start regression. - `cd swirlds-platform/regression; +- Run the following command to start regression. + `cd swirlds-platform/regression; ./regression_services.sh configs/services/suites/personal/GCP-Personal-XXX.json path_to_hedera-services_repository > cron-personal-test.err 2>&1` - - - `regression_services.sh` will compile both the repositories before provisioning instances. + - `regression_services.sh` will compile both the repositories before provisioning instances. - Add `& disown -h` at the end of the above command if it needs to run in background. If not use screen. @@ -167,10 +175,10 @@ Current Services nightly regression runs the following tests based on the cron t All the above tests are under the following path `swirlds-platform/regression/configs/services/suites` under `daily` or `weekly` with appropriate names. -**NOTE** : To validate the regression results follow steps defined in [regression-validation-checklist.md](https://github.com/swirlds/swirlds-platform-regression/blob/develop/docs/regression-validation-checklist.md). +**NOTE** : To validate the regression results follow steps defined in [regression-validation-checklist.md](https://github.com/swirlds/swirlds-platform-regression/blob/main/docs/regression-validation-checklist.md). # **GCP setup to run tests** -Steps to set up GCP are listed in this [document](https://github.com/hashgraph/hedera-services/blob/docs/dev/GCP-setup.md). \ No newline at end of file +Steps to set up GCP are listed in this [document](https://github.com/hashgraph/hedera-services/blob/docs/dev/GCP-setup.md). diff --git a/platform-sdk/README.md b/platform-sdk/README.md index 55ae640891fb..52919c105e2a 100644 --- a/platform-sdk/README.md +++ b/platform-sdk/README.md @@ -46,7 +46,7 @@ Portions of this Hedera Hashgraph, LLC Software may utilize the following copyri use of which is hereby acknowledged. The full list of acknowledgements is available at -[https://github.com/hashgraph/hedera-services/raw/develop/platform-sdk/sdk/docs/acknowledgments.html](sdk/docs/acknowledgments.html) +[https://github.com/hashgraph/hedera-services/raw/main/platform-sdk/sdk/docs/acknowledgments.html](sdk/docs/acknowledgments.html) ## License diff --git a/platform-sdk/description.txt b/platform-sdk/description.txt index 90ec8bd24256..c8bb0806c141 100644 --- a/platform-sdk/description.txt +++ b/platform-sdk/description.txt @@ -1,3 +1,3 @@ -Swirlds is a software platform designed to build fully-distributed applications that harness the power of the cloud +Hedera Hashgraph is a software platform designed to build fully-distributed applications that harness the power of the cloud without servers. Now you can develop applications with fairness in decision making, speed, trust and reliability, at a fraction of the cost of traditional server-based platforms. diff --git a/platform-sdk/docs/core/address-book-management.md b/platform-sdk/docs/core/address-book-management.md index c0f70b980a71..90ea9fe06de5 100644 --- a/platform-sdk/docs/core/address-book-management.md +++ b/platform-sdk/docs/core/address-book-management.md @@ -2,8 +2,8 @@ ## WIP -The address book management pipeline is currently a work in progress. -This document reflects the address book pipeline as we want it to be, not necessarily as it is in `develop` today. +The address book management pipeline is currently a work in progress. +This document reflects the address book pipeline as we want it to be, not necessarily as it is in `main` today. ## Summary @@ -124,7 +124,7 @@ If an `AddressBook` is mutable, it can be updated via the following methods: ## AddressBookStore -An `AddressBookStore` is a collection of `AddressBook` instances from a sequence of recent rounds. +An `AddressBookStore` is a collection of `AddressBook` instances from a sequence of recent rounds. `AddressBookStore` is a merkle node and is stored as a part of the platform's state. @@ -231,8 +231,8 @@ At the end of the round, the platform does basic validation on the mutated addre address book is valid then it is accepted, if it is not valid then it is replaced by a copy of the previous address book. (Note: nothing changes if the address book does not change, it is still ingested in the same way.) -The resulting address book for the round is then inserted into the state's address book store for the next round. -It is also inserted into the address book manager. This may cause an old version of the address book to be +The resulting address book for the round is then inserted into the state's address book store for the next round. +It is also inserted into the address book manager. This may cause an old version of the address book to be removed from both the address book manager and from address book store of the next round. When the dual state for the next round is created, a mutable copy of the address book is made so that the application diff --git a/platform-sdk/docs/core/wiring-diagram.svg b/platform-sdk/docs/core/wiring-diagram.svg index 1e314abde54d..098ffc251b92 100644 --- a/platform-sdk/docs/core/wiring-diagram.svg +++ b/platform-sdk/docs/core/wiring-diagram.svg @@ -1 +1 @@ -
Transaction Handling
State Verification
State Signature Collection
State File Management
Preconsensus Event Stream
PCES Replay
Miscellaneous
Event Intake
Event Creation
Consensus
AppNotifier
❔💢💥🚦
Branch Detection
❔🌀
Consensus Engine
❔🚦
🌀
🚽
EventCreationManager
❔❤️🌀🏥🚦
TransactionPool
♻️❔🏥🖋️🚦
♻️
⚰️
PostHashCollector
Mystery Input
❤️
🏥
💨
🚦
PcesWriter
✅❔🌀📀🚽
💾
📀
State Signature Collector
❔🔰
💢
ISS Detector
💀
💥
🖋️
Transaction Handler
💨🔮
gossip
❔🌀🏥📬🚦
🍎
📝
📬
🔮
🔰
consensus events
rounds
rounds
event window
flush request
future hash
self events
get transactions
GossipEvent
GossipEvent
unordered events
health info
check system health
checkSignedStates
evaluate status
heartbeat
PlatformStatusAction
IssNotification
non-deduplicated events
mystery data
checkForBranches
GossipEvent
unsequenced event
GossipEvent
GossipEvent
events to gossip
preconsensus signatures
GossipEvent
events to write
durable event info
non-validated events
handleConsensusRound
hash override
self events
stale events
publishStaleEvent
non-validated events
hashed states
handleStateAndRound
consensus events
hashed states
signState
complete state
states
complete state notification
state written notification
PlatformStatusAction
minimum identifier to store
submit transaction
PlatformStatus
setState
unhashed state and round
registerState
submit transaction
futures
unhashed event
unhashed event
done streaming pces
\ No newline at end of file +
Transaction Handling
State Verification
State Signature Collection
State File Management
Preconsensus Event Stream
PCES Replay
Miscellaneous
Event Intake
Event Creation
Consensus
AppNotifier
❔💢💥🚦
Branch Detection
❔🌀
Consensus Engine
❔🚦
🌀
🚽
EventCreationManager
❔❤️🌀🏥🚦
TransactionPool
♻️❔🏥🖋️🚦
♻️
⚰️
PostHashCollector
Mystery Input
❤️
🏥
💨
🚦
PcesWriter
✅❔🌀📀🚽
💾
📀
State Signature Collector
❔🔰
💢
ISS Detector
💀
💥
🖋️
Transaction Handler
💨🔮
gossip
❔🌀🏥📬🚦
🍎
📝
📬
🔮
🔰
consensus events
rounds
rounds
event window
flush request
future hash
self events
get transactions
GossipEvent
GossipEvent
unordered events
health info
check system health
checkSignedStates
evaluate status
heartbeat
PlatformStatusAction
IssNotification
non-deduplicated events
mystery data
checkForBranches
GossipEvent
unsequenced event
GossipEvent
GossipEvent
events to gossip
preconsensus signatures
GossipEvent
events to write
durable event info
non-validated events
handleConsensusRound
hash override
self events
stale events
publishStaleEvent
non-validated events
hashed states
handleStateAndRound
consensus events
hashed states
signState
complete state
states
complete state notification
state written notification
PlatformStatusAction
minimum identifier to store
submit transaction
PlatformStatus
setState
unhashed state and round
registerState
submit transaction
futures
unhashed event
unhashed event
done streaming pces
\ No newline at end of file diff --git a/platform-sdk/docs/proposals/README.md b/platform-sdk/docs/proposals/README.md index a11874f047ab..f421e5e234a2 100644 --- a/platform-sdk/docs/proposals/README.md +++ b/platform-sdk/docs/proposals/README.md @@ -128,7 +128,7 @@ to `Withdrawn` and closed. ## Delivery of A Proposal -Once an accepted proposal has been completely implemented, tested, the code merged into `develop`, and the feature is +Once an accepted proposal has been completely implemented, tested, the code merged into `main`, and the feature is planned to be enabled for production, the proposal's content should be merged with the documentation of the platform in `platform-sdk/docs` (or other relevant location such as `module-info.java`), as applicable, and removed from `platform-sdk/docs/proposals`. Once the feature is live on mainnet, the status of the proposal PR in the proposal diff --git a/platform-sdk/docs/proposals/metric-labels/README.md b/platform-sdk/docs/proposals/metric-labels/README.md index b61074586088..625140323a68 100644 --- a/platform-sdk/docs/proposals/metric-labels/README.md +++ b/platform-sdk/docs/proposals/metric-labels/README.md @@ -1,9 +1,8 @@ # Metric labels -| Metadata | Entities | - |--------------------|--------------------------------------------| - | Designers | [@hendrikebbers](https://github.com/hendrikebbers), [@mxtartaglia-sl](https://github.com/mxtartaglia-sl) | - +| Metadata | Entities | +|-----------|----------------------------------------------------------------------------------------------------------| +| Designers | [@hendrikebbers](https://github.com/hendrikebbers), [@mxtartaglia-sl](https://github.com/mxtartaglia-sl) | ## Summary @@ -52,17 +51,17 @@ Labels are used by monitoring systems like Grafana to create more dynamic dashbo In addition to having a unique name, a metric can also have labels. Labels allow for differentiation within a metric. For example, a metric measuring incoming connections can distinguish between GET and POST requests using labels. -Consider the following example with the metric `api_http_requests_total`, which counts the total number of HTTP requests a server receives. +Consider the following example with the metric `api_http_requests_total`, which counts the total number of HTTP requests a server receives. -To distinguish between different types of requests, such as GET and POST requests, one could define two different metrics and aggregate them at query time. Alternatively, one could define possible labels for a single metric when creating it and assign specific values to these labels based on the nature of the HTTP request at measurement time. +To distinguish between different types of requests, such as GET and POST requests, one could define two different metrics and aggregate them at query time. Alternatively, one could define possible labels for a single metric when creating it and assign specific values to these labels based on the nature of the HTTP request at measurement time. Then, using the Prometheus query language: * Sum up the values of all metrics with that name (as always): -`sum(api_http_requests_total) ` +`sum(api_http_requests_total) ` * Or, get the sums of the values measured with the corresponding label information. -`sum(api_http_requests_total{method="GET"})` -`sum(api_http_requests_total{method="POST"})` + `sum(api_http_requests_total{method="GET"})` + `sum(api_http_requests_total{method="POST"})` See https://prometheus.io/docs/prometheus/latest/querying/basics/#time-series-selectors for more information. @@ -86,27 +85,27 @@ A label is a key-value pair that can be defined as a `record`: ```java record Label(@NonNull String key, @NonNull String value) {} -``` +``` The `Metric` interface will be extended by several methods that allow to define label values to a metric: ```java interface Metric { - + //... - + @NonNull Metric withLabel(@NonNull String key, @NonNull String value); - + @NonNull Metric withLabel(@NonNull Label label); - + @NonNull Metric withLabels(@NonNull Label... labels); - + @NonNull Set