From 3200cecd137e1aa495f78cd47c097efadfc510bc Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Thu, 4 Apr 2024 17:42:11 -0300 Subject: [PATCH 01/34] refactor: remove unnecessary Rust code Only the authority-claimer service will remain in the Node 2.0. --- .github/workflows/build.yml | 53 +- .github/workflows/rust-code-quality.yml | 7 +- .gitmodules | 3 - CHANGELOG.md | 7 + Makefile | 16 +- build/Dockerfile | 13 +- build/compose-host.yaml | 12 - build/compose-node.yaml | 1 - .../authority-claimer}/.gitignore | 0 cmd/authority-claimer/.rustfmt.toml | 2 + .../authority-claimer}/Cargo.lock | 1971 ++--------------- cmd/authority-claimer/Cargo.toml | 54 + {offchain => cmd}/authority-claimer/README.md | 0 .../authority-claimer}/build.rs | 2 - .../authority-claimer/src/checker.rs | 7 +- .../authority-claimer/src/claimer.rs | 6 +- .../authority-claimer/src/config/cli.rs | 26 +- .../authority-claimer/src/config/contracts.rs | 18 +- .../authority-claimer/src/config/error.rs | 6 +- .../authority-claimer/src/config/mod.rs | 10 +- .../authority-claimer/src/contracts.rs | 2 - .../authority-claimer/src/http_server.rs | 47 +- .../authority-claimer/src/lib.rs | 28 +- .../authority-claimer/src/listener.rs | 21 +- .../authority-claimer/src/log.rs | 13 +- .../authority-claimer/src/main.rs | 2 +- .../authority-claimer/src/metrics.rs | 4 +- .../authority-claimer/src/redacted.rs | 0 .../src/rollups_events/broker.rs | 4 +- .../src/rollups_events}/common.rs | 8 - .../src/rollups_events/mod.rs | 15 + .../src/rollups_events}/rollups_claims.rs | 2 +- .../src/rollups_events}/rollups_stream.rs | 60 +- .../authority-claimer/src/sender.rs | 4 +- .../src/signer/aws_credentials.rs | 0 .../src/signer/aws_signer.rs | 0 .../authority-claimer/src/signer/mod.rs | 0 .../authority-claimer/src/signer/signer.rs | 5 +- .../authority-claimer/src/test_fixtures.rs | 109 +- .../src/types}/blockchain_config.rs | 23 +- .../authority-claimer/src/types}/error.rs | 8 +- .../authority-claimer/src/types/mod.rs | 5 +- .../authority-claimer/src/types}/utils.rs | 13 - {offchain => cmd}/rust-toolchain.toml | 0 docs/config.md | 10 - internal/node/config/config.go | 6 +- internal/node/config/generate/Config.toml | 9 - internal/node/config/generated.go | 12 - internal/node/handlers.go | 28 - internal/node/services.go | 213 +- internal/services/server-manager.go | 140 -- offchain/.rustfmt.toml | 2 - offchain/Cargo.toml | 96 - offchain/advance-runner/Cargo.toml | 35 - offchain/advance-runner/README.md | 4 - offchain/advance-runner/src/broker.rs | 303 --- offchain/advance-runner/src/config.rs | 85 - offchain/advance-runner/src/error.rs | 26 - offchain/advance-runner/src/lib.rs | 66 - offchain/advance-runner/src/main.rs | 15 - offchain/advance-runner/src/runner.rs | 146 -- .../src/server_manager/claim.rs | 45 - .../src/server_manager/config.rs | 149 -- .../src/server_manager/conversions.rs | 135 -- .../src/server_manager/error.rs | 57 - .../src/server_manager/facade.rs | 371 ---- .../advance-runner/src/server_manager/mod.rs | 12 - offchain/advance-runner/tests/fixtures/mod.rs | 136 -- .../advance-runner/tests/host_integration.rs | 297 --- .../tests/server_integration.rs | 296 --- offchain/authority-claimer/Cargo.toml | 42 - offchain/clippy.toml | 2 - offchain/contracts/Cargo.toml | 16 - offchain/contracts/README.md | 3 - offchain/data/Cargo.toml | 26 - offchain/data/README.md | 71 - offchain/data/build.rs | 6 - offchain/data/diesel.toml | 5 - .../down.sql | 6 - .../up.sql | 36 - .../20230110182039_rollups/down.sql | 10 - .../migrations/20230110182039_rollups/up.sql | 60 - .../20230921143147_completion_status/down.sql | 5 - .../20230921143147_completion_status/up.sql | 14 - offchain/data/src/config.rs | 76 - offchain/data/src/error.rs | 39 - offchain/data/src/lib.rs | 20 - offchain/data/src/migrations.rs | 35 - offchain/data/src/pagination.rs | 628 ------ offchain/data/src/repository.rs | 318 --- offchain/data/src/schema.rs | 79 - offchain/data/src/types.rs | 173 -- offchain/data/tests/migrations.rs | 53 - offchain/data/tests/repository.rs | 726 ------ offchain/data/util/populate.sql | 40 - offchain/dispatcher/Cargo.toml | 37 - offchain/dispatcher/README.md | 4 - offchain/dispatcher/src/config.rs | 98 - offchain/dispatcher/src/dispatcher.rs | 168 -- offchain/dispatcher/src/drivers/context.rs | 689 ------ offchain/dispatcher/src/drivers/machine.rs | 421 ---- offchain/dispatcher/src/drivers/mock.rs | 204 -- offchain/dispatcher/src/drivers/mod.rs | 10 - offchain/dispatcher/src/error.rs | 43 - offchain/dispatcher/src/lib.rs | 33 - offchain/dispatcher/src/machine/mod.rs | 33 - .../dispatcher/src/machine/rollups_broker.rs | 513 ----- offchain/dispatcher/src/main.rs | 15 - offchain/dispatcher/src/metrics.rs | 40 - offchain/dispatcher/src/setup.rs | 106 - offchain/graphql-server/Cargo.toml | 36 - offchain/graphql-server/README.md | 20 - offchain/graphql-server/src/config.rs | 48 - offchain/graphql-server/src/error.rs | 16 - offchain/graphql-server/src/http.rs | 95 - offchain/graphql-server/src/lib.rs | 35 - offchain/graphql-server/src/main.rs | 17 - .../src/schema/generate_schema.rs | 30 - offchain/graphql-server/src/schema/mod.rs | 16 - .../graphql-server/src/schema/resolvers.rs | 742 ------- offchain/graphql-server/src/schema/scalar.rs | 170 -- offchain/graphql-server/tests/integration.rs | 595 ----- .../tests/queries/error_missing_argument.json | 3 - .../tests/queries/error_not_found.json | 3 - .../tests/queries/error_unknown_field.json | 3 - .../graphql-server/tests/queries/input.json | 3 - .../tests/queries/input_with_notice.json | 3 - .../tests/queries/input_with_notices.json | 3 - .../tests/queries/input_with_report.json | 3 - .../tests/queries/input_with_reports.json | 3 - .../tests/queries/input_with_voucher.json | 3 - .../tests/queries/input_with_vouchers.json | 3 - .../graphql-server/tests/queries/inputs.json | 3 - .../tests/queries/next_page.json | 3 - .../graphql-server/tests/queries/notice.json | 3 - .../tests/queries/notice_with_input.json | 3 - .../tests/queries/notice_with_proof.json | 3 - .../graphql-server/tests/queries/notices.json | 3 - .../tests/queries/previous_page.json | 3 - .../tests/queries/proof_from_notice.json | 3 - .../tests/queries/proof_from_voucher.json | 3 - .../graphql-server/tests/queries/report.json | 3 - .../tests/queries/report_with_input.json | 3 - .../graphql-server/tests/queries/reports.json | 3 - .../tests/queries/variables.json | 6 - .../graphql-server/tests/queries/voucher.json | 3 - .../tests/queries/voucher_with_input.json | 3 - .../tests/queries/voucher_with_proof.json | 3 - .../tests/queries/vouchers.json | 3 - .../responses/error_missing_argument.json | 1 - .../tests/responses/error_not_found.json | 1 - .../tests/responses/error_unknown_field.json | 1 - .../graphql-server/tests/responses/input.json | 1 - .../tests/responses/input_with_notice.json | 1 - .../tests/responses/input_with_notices.json | 1 - .../tests/responses/input_with_report.json | 1 - .../tests/responses/input_with_reports.json | 1 - .../tests/responses/input_with_voucher.json | 1 - .../tests/responses/input_with_vouchers.json | 1 - .../tests/responses/inputs.json | 1 - .../tests/responses/next_page.json | 1 - .../tests/responses/notice.json | 1 - .../tests/responses/notice_with_input.json | 1 - .../tests/responses/notice_with_proof.json | 1 - .../tests/responses/notices.json | 1 - .../tests/responses/previous_page.json | 1 - .../tests/responses/proof_from_notice.json | 1 - .../tests/responses/proof_from_voucher.json | 1 - .../tests/responses/report.json | 1 - .../tests/responses/report_with_input.json | 1 - .../tests/responses/reports.json | 1 - .../tests/responses/variables.json | 1 - .../tests/responses/voucher.json | 1 - .../tests/responses/voucher_with_input.json | 1 - .../tests/responses/voucher_with_proof.json | 1 - .../tests/responses/vouchers.json | 1 - offchain/grpc-interfaces/Cargo.toml | 15 - offchain/grpc-interfaces/build.rs | 18 - offchain/grpc-interfaces/grpc-interfaces | 1 - offchain/grpc-interfaces/src/lib.rs | 14 - offchain/host-runner/Cargo.toml | 38 - offchain/host-runner/README.md | 13 - offchain/host-runner/src/config.rs | 77 - offchain/host-runner/src/controller.rs | 941 -------- offchain/host-runner/src/conversions.rs | 88 - offchain/host-runner/src/driver.rs | 74 - offchain/host-runner/src/grpc/mod.rs | 47 - .../host-runner/src/grpc/server_manager.rs | 1036 --------- offchain/host-runner/src/hash.rs | 72 - offchain/host-runner/src/http/errors.rs | 40 - offchain/host-runner/src/http/mod.rs | 17 - offchain/host-runner/src/http/model.rs | 204 -- .../host-runner/src/http/rollup_server.rs | 127 -- offchain/host-runner/src/main.rs | 84 - .../host-runner/src/merkle_tree/complete.rs | 476 ---- offchain/host-runner/src/merkle_tree/mod.rs | 39 - .../host-runner/src/merkle_tree/pristine.rs | 127 -- offchain/host-runner/src/merkle_tree/proof.rs | 69 - offchain/host-runner/src/model.rs | 209 -- offchain/host-runner/src/proofs.rs | 47 - offchain/host-runner/tests/common/config.rs | 28 - .../host-runner/tests/common/grpc_client.rs | 57 - .../host-runner/tests/common/http_client.rs | 118 - offchain/host-runner/tests/common/manager.rs | 57 - offchain/host-runner/tests/common/mod.rs | 71 - offchain/host-runner/tests/grpc.rs | 17 - .../tests/grpc_tests/advance_state.rs | 58 - .../tests/grpc_tests/delete_epoch.rs | 116 - .../tests/grpc_tests/end_session.rs | 50 - .../tests/grpc_tests/finish_epoch.rs | 360 --- .../tests/grpc_tests/get_epoch_status.rs | 118 - .../tests/grpc_tests/get_session_status.rs | 88 - .../tests/grpc_tests/get_status.rs | 44 - .../tests/grpc_tests/get_version.rs | 29 - .../tests/grpc_tests/inspect_state.rs | 169 -- .../tests/grpc_tests/start_session.rs | 60 - offchain/host-runner/tests/http.rs | 12 - .../host-runner/tests/http_tests/exception.rs | 98 - .../host-runner/tests/http_tests/finish.rs | 179 -- .../host-runner/tests/http_tests/notice.rs | 66 - .../host-runner/tests/http_tests/report.rs | 92 - .../host-runner/tests/http_tests/voucher.rs | 73 - offchain/http-health-check/Cargo.toml | 11 - offchain/http-health-check/README.md | 3 - offchain/http-health-check/src/lib.rs | 31 - offchain/http-server/Cargo.toml | 13 - offchain/http-server/src/config.rs | 45 - offchain/indexer/Cargo.toml | 32 - offchain/indexer/README.md | 4 - offchain/indexer/src/config.rs | 56 - offchain/indexer/src/conversions.rs | 126 -- offchain/indexer/src/error.rs | 27 - offchain/indexer/src/indexer.rs | 130 -- offchain/indexer/src/lib.rs | 26 - offchain/indexer/src/main.rs | 17 - offchain/indexer/tests/integration.rs | 543 ----- offchain/inspect-server/Cargo.toml | 32 - offchain/inspect-server/README.md | 21 - offchain/inspect-server/src/config.rs | 132 -- offchain/inspect-server/src/error.rs | 22 - offchain/inspect-server/src/inspect.rs | 115 - offchain/inspect-server/src/lib.rs | 29 - offchain/inspect-server/src/main.rs | 17 - offchain/inspect-server/src/server.rs | 143 -- offchain/inspect-server/tests/common/mod.rs | 311 --- offchain/inspect-server/tests/payload.rs | 135 -- offchain/inspect-server/tests/queue.rs | 242 -- offchain/inspect-server/tests/response.rs | 176 -- offchain/log/Cargo.toml | 13 - offchain/log/build.rs | 7 - offchain/redacted/Cargo.toml | 8 - offchain/rollups-events/Cargo.toml | 35 - offchain/rollups-events/README.md | 8 - offchain/rollups-events/src/broker/indexer.rs | 106 - offchain/rollups-events/src/lib.rs | 26 - offchain/rollups-events/src/rollups_inputs.rs | 69 - .../rollups-events/src/rollups_outputs.rs | 88 - offchain/rollups-events/tests/indexer.rs | 224 -- offchain/rollups-events/tests/integration.rs | 288 --- offchain/rollups-http-client/Cargo.toml | 11 - offchain/rollups-http-client/README.md | 3 - offchain/rollups-http-client/src/client.rs | 212 -- offchain/rollups-http-client/src/rollup.rs | 75 - offchain/state-server/Cargo.toml | 25 - offchain/state-server/README.md | 5 - offchain/state-server/src/config.rs | 43 - offchain/state-server/src/error.rs | 28 - offchain/state-server/src/lib.rs | 116 - offchain/state-server/src/main.rs | 18 - offchain/test-fixtures/Cargo.toml | 21 - offchain/test-fixtures/README.md | 3 - .../docker/server_manager_nonroot.Dockerfile | 19 - offchain/test-fixtures/src/data.rs | 64 - offchain/test-fixtures/src/docker_cli.rs | 47 - offchain/test-fixtures/src/echo_dapp.rs | 127 -- .../test-fixtures/src/host_server_manager.rs | 200 -- offchain/test-fixtures/src/lib.rs | 19 - .../test-fixtures/src/machine_snapshots.rs | 36 - offchain/test-fixtures/src/repository.rs | 87 - offchain/test-fixtures/src/server_manager.rs | 214 -- offchain/types/Cargo.toml | 22 - offchain/types/README.md | 3 - offchain/types/src/foldables.rs | 240 -- offchain/types/src/lib.rs | 13 - offchain/types/src/user_data.rs | 25 - setup_env.sh | 3 +- 286 files changed, 389 insertions(+), 22549 deletions(-) delete mode 100644 build/compose-host.yaml rename {offchain => cmd/authority-claimer}/.gitignore (100%) create mode 100644 cmd/authority-claimer/.rustfmt.toml rename {offchain => cmd/authority-claimer}/Cargo.lock (75%) create mode 100644 cmd/authority-claimer/Cargo.toml rename {offchain => cmd}/authority-claimer/README.md (100%) rename {offchain/contracts => cmd/authority-claimer}/build.rs (96%) rename {offchain => cmd}/authority-claimer/src/checker.rs (96%) rename {offchain => cmd}/authority-claimer/src/claimer.rs (95%) rename {offchain => cmd}/authority-claimer/src/config/cli.rs (97%) rename {offchain => cmd}/authority-claimer/src/config/contracts.rs (88%) rename {offchain => cmd}/authority-claimer/src/config/error.rs (86%) rename {offchain => cmd}/authority-claimer/src/config/mod.rs (88%) rename offchain/contracts/src/lib.rs => cmd/authority-claimer/src/contracts.rs (91%) rename offchain/http-server/src/lib.rs => cmd/authority-claimer/src/http_server.rs (52%) rename {offchain => cmd}/authority-claimer/src/lib.rs (89%) rename {offchain => cmd}/authority-claimer/src/listener.rs (96%) rename offchain/log/src/lib.rs => cmd/authority-claimer/src/log.rs (75%) rename {offchain => cmd}/authority-claimer/src/main.rs (93%) rename {offchain => cmd}/authority-claimer/src/metrics.rs (89%) rename offchain/redacted/src/lib.rs => cmd/authority-claimer/src/redacted.rs (100%) rename offchain/rollups-events/src/broker/mod.rs => cmd/authority-claimer/src/rollups_events/broker.rs (99%) rename {offchain/rollups-events/src => cmd/authority-claimer/src/rollups_events}/common.rs (97%) create mode 100644 cmd/authority-claimer/src/rollups_events/mod.rs rename {offchain/rollups-events/src => cmd/authority-claimer/src/rollups_events}/rollups_claims.rs (95%) rename {offchain/rollups-events/src => cmd/authority-claimer/src/rollups_events}/rollups_stream.rs (55%) rename {offchain => cmd}/authority-claimer/src/sender.rs (98%) rename {offchain => cmd}/authority-claimer/src/signer/aws_credentials.rs (100%) rename {offchain => cmd}/authority-claimer/src/signer/aws_signer.rs (100%) rename {offchain => cmd}/authority-claimer/src/signer/mod.rs (100%) rename {offchain => cmd}/authority-claimer/src/signer/signer.rs (99%) rename offchain/test-fixtures/src/broker.rs => cmd/authority-claimer/src/test_fixtures.rs (55%) rename {offchain/types/src => cmd/authority-claimer/src/types}/blockchain_config.rs (92%) rename {offchain/types/src => cmd/authority-claimer/src/types}/error.rs (86%) rename offchain/rollups-http-client/src/lib.rs => cmd/authority-claimer/src/types/mod.rs (65%) rename {offchain/types/src => cmd/authority-claimer/src/types}/utils.rs (83%) rename {offchain => cmd}/rust-toolchain.toml (100%) delete mode 100644 internal/services/server-manager.go delete mode 100644 offchain/.rustfmt.toml delete mode 100644 offchain/Cargo.toml delete mode 100644 offchain/advance-runner/Cargo.toml delete mode 100644 offchain/advance-runner/README.md delete mode 100644 offchain/advance-runner/src/broker.rs delete mode 100644 offchain/advance-runner/src/config.rs delete mode 100644 offchain/advance-runner/src/error.rs delete mode 100644 offchain/advance-runner/src/lib.rs delete mode 100644 offchain/advance-runner/src/main.rs delete mode 100644 offchain/advance-runner/src/runner.rs delete mode 100644 offchain/advance-runner/src/server_manager/claim.rs delete mode 100644 offchain/advance-runner/src/server_manager/config.rs delete mode 100644 offchain/advance-runner/src/server_manager/conversions.rs delete mode 100644 offchain/advance-runner/src/server_manager/error.rs delete mode 100644 offchain/advance-runner/src/server_manager/facade.rs delete mode 100644 offchain/advance-runner/src/server_manager/mod.rs delete mode 100644 offchain/advance-runner/tests/fixtures/mod.rs delete mode 100644 offchain/advance-runner/tests/host_integration.rs delete mode 100644 offchain/advance-runner/tests/server_integration.rs delete mode 100644 offchain/authority-claimer/Cargo.toml delete mode 100644 offchain/clippy.toml delete mode 100644 offchain/contracts/Cargo.toml delete mode 100644 offchain/contracts/README.md delete mode 100644 offchain/data/Cargo.toml delete mode 100644 offchain/data/README.md delete mode 100644 offchain/data/build.rs delete mode 100644 offchain/data/diesel.toml delete mode 100644 offchain/data/migrations/00000000000000_diesel_initial_setup/down.sql delete mode 100644 offchain/data/migrations/00000000000000_diesel_initial_setup/up.sql delete mode 100644 offchain/data/migrations/20230110182039_rollups/down.sql delete mode 100644 offchain/data/migrations/20230110182039_rollups/up.sql delete mode 100644 offchain/data/migrations/20230921143147_completion_status/down.sql delete mode 100644 offchain/data/migrations/20230921143147_completion_status/up.sql delete mode 100644 offchain/data/src/config.rs delete mode 100644 offchain/data/src/error.rs delete mode 100644 offchain/data/src/lib.rs delete mode 100644 offchain/data/src/migrations.rs delete mode 100644 offchain/data/src/pagination.rs delete mode 100644 offchain/data/src/repository.rs delete mode 100644 offchain/data/src/schema.rs delete mode 100644 offchain/data/src/types.rs delete mode 100644 offchain/data/tests/migrations.rs delete mode 100644 offchain/data/tests/repository.rs delete mode 100644 offchain/data/util/populate.sql delete mode 100644 offchain/dispatcher/Cargo.toml delete mode 100644 offchain/dispatcher/README.md delete mode 100644 offchain/dispatcher/src/config.rs delete mode 100644 offchain/dispatcher/src/dispatcher.rs delete mode 100644 offchain/dispatcher/src/drivers/context.rs delete mode 100644 offchain/dispatcher/src/drivers/machine.rs delete mode 100644 offchain/dispatcher/src/drivers/mock.rs delete mode 100644 offchain/dispatcher/src/drivers/mod.rs delete mode 100644 offchain/dispatcher/src/error.rs delete mode 100644 offchain/dispatcher/src/lib.rs delete mode 100644 offchain/dispatcher/src/machine/mod.rs delete mode 100644 offchain/dispatcher/src/machine/rollups_broker.rs delete mode 100644 offchain/dispatcher/src/main.rs delete mode 100644 offchain/dispatcher/src/metrics.rs delete mode 100644 offchain/dispatcher/src/setup.rs delete mode 100644 offchain/graphql-server/Cargo.toml delete mode 100644 offchain/graphql-server/README.md delete mode 100644 offchain/graphql-server/src/config.rs delete mode 100644 offchain/graphql-server/src/error.rs delete mode 100644 offchain/graphql-server/src/http.rs delete mode 100644 offchain/graphql-server/src/lib.rs delete mode 100644 offchain/graphql-server/src/main.rs delete mode 100644 offchain/graphql-server/src/schema/generate_schema.rs delete mode 100644 offchain/graphql-server/src/schema/mod.rs delete mode 100644 offchain/graphql-server/src/schema/resolvers.rs delete mode 100644 offchain/graphql-server/src/schema/scalar.rs delete mode 100644 offchain/graphql-server/tests/integration.rs delete mode 100644 offchain/graphql-server/tests/queries/error_missing_argument.json delete mode 100644 offchain/graphql-server/tests/queries/error_not_found.json delete mode 100644 offchain/graphql-server/tests/queries/error_unknown_field.json delete mode 100644 offchain/graphql-server/tests/queries/input.json delete mode 100644 offchain/graphql-server/tests/queries/input_with_notice.json delete mode 100644 offchain/graphql-server/tests/queries/input_with_notices.json delete mode 100644 offchain/graphql-server/tests/queries/input_with_report.json delete mode 100644 offchain/graphql-server/tests/queries/input_with_reports.json delete mode 100644 offchain/graphql-server/tests/queries/input_with_voucher.json delete mode 100644 offchain/graphql-server/tests/queries/input_with_vouchers.json delete mode 100644 offchain/graphql-server/tests/queries/inputs.json delete mode 100644 offchain/graphql-server/tests/queries/next_page.json delete mode 100644 offchain/graphql-server/tests/queries/notice.json delete mode 100644 offchain/graphql-server/tests/queries/notice_with_input.json delete mode 100644 offchain/graphql-server/tests/queries/notice_with_proof.json delete mode 100644 offchain/graphql-server/tests/queries/notices.json delete mode 100644 offchain/graphql-server/tests/queries/previous_page.json delete mode 100644 offchain/graphql-server/tests/queries/proof_from_notice.json delete mode 100644 offchain/graphql-server/tests/queries/proof_from_voucher.json delete mode 100644 offchain/graphql-server/tests/queries/report.json delete mode 100644 offchain/graphql-server/tests/queries/report_with_input.json delete mode 100644 offchain/graphql-server/tests/queries/reports.json delete mode 100644 offchain/graphql-server/tests/queries/variables.json delete mode 100644 offchain/graphql-server/tests/queries/voucher.json delete mode 100644 offchain/graphql-server/tests/queries/voucher_with_input.json delete mode 100644 offchain/graphql-server/tests/queries/voucher_with_proof.json delete mode 100644 offchain/graphql-server/tests/queries/vouchers.json delete mode 100644 offchain/graphql-server/tests/responses/error_missing_argument.json delete mode 100644 offchain/graphql-server/tests/responses/error_not_found.json delete mode 100644 offchain/graphql-server/tests/responses/error_unknown_field.json delete mode 100644 offchain/graphql-server/tests/responses/input.json delete mode 100644 offchain/graphql-server/tests/responses/input_with_notice.json delete mode 100644 offchain/graphql-server/tests/responses/input_with_notices.json delete mode 100644 offchain/graphql-server/tests/responses/input_with_report.json delete mode 100644 offchain/graphql-server/tests/responses/input_with_reports.json delete mode 100644 offchain/graphql-server/tests/responses/input_with_voucher.json delete mode 100644 offchain/graphql-server/tests/responses/input_with_vouchers.json delete mode 100644 offchain/graphql-server/tests/responses/inputs.json delete mode 100644 offchain/graphql-server/tests/responses/next_page.json delete mode 100644 offchain/graphql-server/tests/responses/notice.json delete mode 100644 offchain/graphql-server/tests/responses/notice_with_input.json delete mode 100644 offchain/graphql-server/tests/responses/notice_with_proof.json delete mode 100644 offchain/graphql-server/tests/responses/notices.json delete mode 100644 offchain/graphql-server/tests/responses/previous_page.json delete mode 100644 offchain/graphql-server/tests/responses/proof_from_notice.json delete mode 100644 offchain/graphql-server/tests/responses/proof_from_voucher.json delete mode 100644 offchain/graphql-server/tests/responses/report.json delete mode 100644 offchain/graphql-server/tests/responses/report_with_input.json delete mode 100644 offchain/graphql-server/tests/responses/reports.json delete mode 100644 offchain/graphql-server/tests/responses/variables.json delete mode 100644 offchain/graphql-server/tests/responses/voucher.json delete mode 100644 offchain/graphql-server/tests/responses/voucher_with_input.json delete mode 100644 offchain/graphql-server/tests/responses/voucher_with_proof.json delete mode 100644 offchain/graphql-server/tests/responses/vouchers.json delete mode 100644 offchain/grpc-interfaces/Cargo.toml delete mode 100644 offchain/grpc-interfaces/build.rs delete mode 160000 offchain/grpc-interfaces/grpc-interfaces delete mode 100644 offchain/grpc-interfaces/src/lib.rs delete mode 100644 offchain/host-runner/Cargo.toml delete mode 100644 offchain/host-runner/README.md delete mode 100644 offchain/host-runner/src/config.rs delete mode 100644 offchain/host-runner/src/controller.rs delete mode 100644 offchain/host-runner/src/conversions.rs delete mode 100644 offchain/host-runner/src/driver.rs delete mode 100644 offchain/host-runner/src/grpc/mod.rs delete mode 100644 offchain/host-runner/src/grpc/server_manager.rs delete mode 100644 offchain/host-runner/src/hash.rs delete mode 100644 offchain/host-runner/src/http/errors.rs delete mode 100644 offchain/host-runner/src/http/mod.rs delete mode 100644 offchain/host-runner/src/http/model.rs delete mode 100644 offchain/host-runner/src/http/rollup_server.rs delete mode 100644 offchain/host-runner/src/main.rs delete mode 100644 offchain/host-runner/src/merkle_tree/complete.rs delete mode 100644 offchain/host-runner/src/merkle_tree/mod.rs delete mode 100644 offchain/host-runner/src/merkle_tree/pristine.rs delete mode 100644 offchain/host-runner/src/merkle_tree/proof.rs delete mode 100644 offchain/host-runner/src/model.rs delete mode 100644 offchain/host-runner/src/proofs.rs delete mode 100644 offchain/host-runner/tests/common/config.rs delete mode 100644 offchain/host-runner/tests/common/grpc_client.rs delete mode 100644 offchain/host-runner/tests/common/http_client.rs delete mode 100644 offchain/host-runner/tests/common/manager.rs delete mode 100644 offchain/host-runner/tests/common/mod.rs delete mode 100644 offchain/host-runner/tests/grpc.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/advance_state.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/delete_epoch.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/end_session.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/finish_epoch.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/get_epoch_status.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/get_session_status.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/get_status.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/get_version.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/inspect_state.rs delete mode 100644 offchain/host-runner/tests/grpc_tests/start_session.rs delete mode 100644 offchain/host-runner/tests/http.rs delete mode 100644 offchain/host-runner/tests/http_tests/exception.rs delete mode 100644 offchain/host-runner/tests/http_tests/finish.rs delete mode 100644 offchain/host-runner/tests/http_tests/notice.rs delete mode 100644 offchain/host-runner/tests/http_tests/report.rs delete mode 100644 offchain/host-runner/tests/http_tests/voucher.rs delete mode 100644 offchain/http-health-check/Cargo.toml delete mode 100644 offchain/http-health-check/README.md delete mode 100644 offchain/http-health-check/src/lib.rs delete mode 100644 offchain/http-server/Cargo.toml delete mode 100644 offchain/http-server/src/config.rs delete mode 100644 offchain/indexer/Cargo.toml delete mode 100644 offchain/indexer/README.md delete mode 100644 offchain/indexer/src/config.rs delete mode 100644 offchain/indexer/src/conversions.rs delete mode 100644 offchain/indexer/src/error.rs delete mode 100644 offchain/indexer/src/indexer.rs delete mode 100644 offchain/indexer/src/lib.rs delete mode 100644 offchain/indexer/src/main.rs delete mode 100644 offchain/indexer/tests/integration.rs delete mode 100644 offchain/inspect-server/Cargo.toml delete mode 100644 offchain/inspect-server/README.md delete mode 100644 offchain/inspect-server/src/config.rs delete mode 100644 offchain/inspect-server/src/error.rs delete mode 100644 offchain/inspect-server/src/inspect.rs delete mode 100644 offchain/inspect-server/src/lib.rs delete mode 100644 offchain/inspect-server/src/main.rs delete mode 100644 offchain/inspect-server/src/server.rs delete mode 100644 offchain/inspect-server/tests/common/mod.rs delete mode 100644 offchain/inspect-server/tests/payload.rs delete mode 100644 offchain/inspect-server/tests/queue.rs delete mode 100644 offchain/inspect-server/tests/response.rs delete mode 100644 offchain/log/Cargo.toml delete mode 100644 offchain/log/build.rs delete mode 100644 offchain/redacted/Cargo.toml delete mode 100644 offchain/rollups-events/Cargo.toml delete mode 100644 offchain/rollups-events/README.md delete mode 100644 offchain/rollups-events/src/broker/indexer.rs delete mode 100644 offchain/rollups-events/src/lib.rs delete mode 100644 offchain/rollups-events/src/rollups_inputs.rs delete mode 100644 offchain/rollups-events/src/rollups_outputs.rs delete mode 100644 offchain/rollups-events/tests/indexer.rs delete mode 100644 offchain/rollups-events/tests/integration.rs delete mode 100644 offchain/rollups-http-client/Cargo.toml delete mode 100644 offchain/rollups-http-client/README.md delete mode 100644 offchain/rollups-http-client/src/client.rs delete mode 100644 offchain/rollups-http-client/src/rollup.rs delete mode 100644 offchain/state-server/Cargo.toml delete mode 100644 offchain/state-server/README.md delete mode 100644 offchain/state-server/src/config.rs delete mode 100644 offchain/state-server/src/error.rs delete mode 100644 offchain/state-server/src/lib.rs delete mode 100644 offchain/state-server/src/main.rs delete mode 100644 offchain/test-fixtures/Cargo.toml delete mode 100644 offchain/test-fixtures/README.md delete mode 100644 offchain/test-fixtures/docker/server_manager_nonroot.Dockerfile delete mode 100644 offchain/test-fixtures/src/data.rs delete mode 100644 offchain/test-fixtures/src/docker_cli.rs delete mode 100644 offchain/test-fixtures/src/echo_dapp.rs delete mode 100644 offchain/test-fixtures/src/host_server_manager.rs delete mode 100644 offchain/test-fixtures/src/lib.rs delete mode 100644 offchain/test-fixtures/src/machine_snapshots.rs delete mode 100644 offchain/test-fixtures/src/repository.rs delete mode 100644 offchain/test-fixtures/src/server_manager.rs delete mode 100644 offchain/types/Cargo.toml delete mode 100644 offchain/types/README.md delete mode 100644 offchain/types/src/foldables.rs delete mode 100644 offchain/types/src/lib.rs delete mode 100644 offchain/types/src/user_data.rs diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0efde78c0..c9cc4d3cb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,13 +19,13 @@ permissions: contents: write jobs: - test: + test-rust: runs-on: ubuntu-22.04 env: RUSTFLAGS: -D warnings -C debuginfo=0 defaults: run: - working-directory: offchain + working-directory: cmd/authority-claimer steps: - uses: actions/checkout@v4 with: @@ -40,15 +40,11 @@ jobs: ./docker-bake.override.hcl ./docker-bake.platforms.hcl targets: | - rollups-node-devnet rollups-node-snapshot project: ${{ vars.DEPOT_PROJECT }} workdir: build load: true - - name: 📦 Install protoc - run: sudo apt update && sudo apt install -y protobuf-compiler libprotobuf-dev - - uses: actions/cache@v4 with: path: | @@ -56,7 +52,7 @@ jobs: ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ - offchain/target/ + ./cmd/authority-claimer/target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: ${{ runner.os }}-cargo- @@ -83,15 +79,28 @@ jobs: - name: Run tests run: cargo test - - name: Generate GraphQL schema - run: ./target/debug/generate-schema + test-go: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive - - name: Upload GraphQL schema - uses: actions/upload-artifact@v4 + - uses: depot/setup-action@v1 + - name: Build dependency images + uses: depot/bake-action@v1 with: - name: graphql-schema - path: offchain/schema.graphql - if-no-files-found: error + files: | + ./docker-bake.hcl + ./docker-bake.override.hcl + ./docker-bake.platforms.hcl + targets: | + rollups-node-devnet + rollups-node-snapshot + project: ${{ vars.DEPOT_PROJECT }} + workdir: build + load: true + - name: Install Go uses: actions/setup-go@v5 @@ -104,13 +113,13 @@ jobs: version: v1.58.2 - name: Run Go tests - working-directory: ${{ github.workspace }} run: go test ./... - build_docker: + build-docker: runs-on: ubuntu-22.04 needs: - - test + - test-rust + - test-go steps: - uses: actions/checkout@v4 with: @@ -158,8 +167,9 @@ jobs: workdir: build release: - needs: [test, build_docker] runs-on: ubuntu-22.04 + needs: + - build-docker if: startsWith(github.ref, 'refs/tags/v') steps: - uses: actions/checkout@v4 @@ -169,14 +179,9 @@ jobs: - name: Trim CHANGELOG.md run: sed -e '0,/^##[^#]/d' -e '/^##[^#]/,$d' -i CHANGELOG.md - - name: Download GraphQL schema - uses: actions/download-artifact@v4 - with: - name: graphql-schema - - name: Publish Github release uses: softprops/action-gh-release@v2 with: prerelease: true body_path: CHANGELOG.md - files: schema.graphql + files: api/graphql/reader.graphql diff --git a/.github/workflows/rust-code-quality.yml b/.github/workflows/rust-code-quality.yml index c992877b9..cfadfa482 100644 --- a/.github/workflows/rust-code-quality.yml +++ b/.github/workflows/rust-code-quality.yml @@ -8,7 +8,7 @@ jobs: runs-on: ubuntu-latest defaults: run: - working-directory: offchain + working-directory: cmd/authority-claimer steps: - uses: actions/checkout@v4 @@ -22,13 +22,10 @@ jobs: ~/.cargo/registry/index/ ~/.cargo/registry/cache/ ~/.cargo/git/db/ - offchain/target/ + cmd/authority-claimer/target/ key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: ${{ runner.os }}-cargo- - - name: Install protoc - run: sudo apt update && sudo apt install -y protobuf-compiler libprotobuf-dev - - name: Update rust run: rustup update diff --git a/.gitmodules b/.gitmodules index 3940b309f..489ee2306 100644 --- a/.gitmodules +++ b/.gitmodules @@ -1,6 +1,3 @@ -[submodule "grpc-interfaces"] - path = offchain/grpc-interfaces/grpc-interfaces - url = ../grpc-interfaces.git [submodule "rollups-contracts"] path = rollups-contracts url = https://github.com/cartesi/rollups-contracts diff --git a/CHANGELOG.md b/CHANGELOG.md index 2bfbc0444..cae163a2e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,13 @@ All notable changes to this project will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). +## [Unreleased] + +### Removed + +- Removed `advance-runner`, `dispatcher`, `graphql-server`, `host-runner`, `indexer`, `inspect-server`, and `state-server` Rust services +- Removed support to host mode + ## [1.5.0] 2024-07-08 ### Added diff --git a/Makefile b/Makefile index ac3fea1a5..dec80d07e 100644 --- a/Makefile +++ b/Makefile @@ -27,13 +27,8 @@ generate: ## Generate the file that are commited to the repo @go mod tidy @go generate -v ./... -.PHONY: graphql-schema -graphql-schema: ## Generate the graphql schema file - @cd offchain; cargo run --bin generate-schema - @mv offchain/schema.graphql api/graphql/reader.graphql - .PHONY: check-generate -check-generate: generate graphql-schema ## Check whether the generated files are in sync +check-generate: generate ## Check whether the generated files are in sync @echo "Checking differences on the repository..." @if git diff --exit-code; then \ echo "No differences found."; \ @@ -59,15 +54,6 @@ docker-run: docker-clean ## Run the node with the anvil devnet -f ./build/compose-node.yaml \ up -.PHONY: docker-run-host -docker-run-host: docker-clean ## Run the node in host mode - @docker compose \ - -f ./build/compose-database.yaml \ - -f ./build/compose-devnet.yaml \ - -f ./build/compose-node.yaml \ - -f ./build/compose-host.yaml \ - up - .PHONY: docker-run-sepolia docker-run-sepolia: docker-clean ## Run the node with the sepolia testnet @if [ ! -n "$$RPC_HTTP_URL" ]; then \ diff --git a/build/Dockerfile b/build/Dockerfile index aff835eea..c91bb7df0 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -67,7 +67,6 @@ adduser --system --uid 102 \ cartesi EOF - #################################################################################################### # TARGET: rollups-node-snapshot # @@ -112,7 +111,6 @@ COPY --from=snapshot-builder --chown=cartesi:cartesi ${SNAPSHOT_BUILD_PATH} ${SN # Set dummy command. CMD /bin/bash - #################################################################################################### # TARGET: rollups-node-devnet # @@ -243,7 +241,7 @@ EOF # # This stage prepares the recipe with just the external dependencies. FROM rust-chef as rust-prepare -COPY ./offchain/ . +COPY ./cmd/authority-claimer/ . RUN cargo chef prepare --recipe-path recipe.json # STAGE: rust-builder @@ -269,7 +267,7 @@ COPY --from=rust-prepare ${RUST_BUILD_PATH}/recipe.json . RUN cargo chef cook --release --recipe-path recipe.json # Build application. -COPY ./offchain/ . +COPY ./cmd/authority-claimer/ . RUN cargo build --release # STAGE: go-builder @@ -327,14 +325,7 @@ COPY --from=server-manager /usr/bin/server-manager /usr/bin # Explicitly copy each binary to avoid adding unnecessary files to the runtime image. ARG RUST_BUILD_PATH ARG RUST_TARGET=${RUST_BUILD_PATH}/target/release -COPY --from=rust-builder ${RUST_TARGET}/cartesi-rollups-advance-runner /usr/bin COPY --from=rust-builder ${RUST_TARGET}/cartesi-rollups-authority-claimer /usr/bin -COPY --from=rust-builder ${RUST_TARGET}/cartesi-rollups-dispatcher /usr/bin -COPY --from=rust-builder ${RUST_TARGET}/cartesi-rollups-graphql-server /usr/bin -COPY --from=rust-builder ${RUST_TARGET}/cartesi-rollups-host-runner /usr/bin -COPY --from=rust-builder ${RUST_TARGET}/cartesi-rollups-indexer /usr/bin -COPY --from=rust-builder ${RUST_TARGET}/cartesi-rollups-inspect-server /usr/bin -COPY --from=rust-builder ${RUST_TARGET}/cartesi-rollups-state-server /usr/bin # Copy Go binary. ARG GO_BUILD_PATH diff --git a/build/compose-host.yaml b/build/compose-host.yaml deleted file mode 100644 index f48bad3e3..000000000 --- a/build/compose-host.yaml +++ /dev/null @@ -1,12 +0,0 @@ -# This compose file contains the config to run the node in host mode. - -version: "3.9" - -name: rollups-node -services: - node: - ports: - - "10007:10007" # Host Runner Rollup API - environment: - CARTESI_FEATURE_HOST_MODE: "true" - CARTESI_FEATURE_DISABLE_MACHINE_HASH_CHECK: "true" diff --git a/build/compose-node.yaml b/build/compose-node.yaml index b157b4482..a83d35ae3 100644 --- a/build/compose-node.yaml +++ b/build/compose-node.yaml @@ -13,7 +13,6 @@ services: environment: CARTESI_LOG_LEVEL: "info" CARTESI_LOG_PRETTY: "true" - CARTESI_FEATURE_HOST_MODE: "false" CARTESI_FEATURE_DISABLE_CLAIMER: "false" CARTESI_HTTP_ADDRESS: "0.0.0.0" CARTESI_HTTP_PORT: "10000" diff --git a/offchain/.gitignore b/cmd/authority-claimer/.gitignore similarity index 100% rename from offchain/.gitignore rename to cmd/authority-claimer/.gitignore diff --git a/cmd/authority-claimer/.rustfmt.toml b/cmd/authority-claimer/.rustfmt.toml new file mode 100644 index 000000000..60a370b99 --- /dev/null +++ b/cmd/authority-claimer/.rustfmt.toml @@ -0,0 +1,2 @@ +edition = "2021" +max_width = 80 diff --git a/offchain/Cargo.lock b/cmd/authority-claimer/Cargo.lock similarity index 75% rename from offchain/Cargo.lock rename to cmd/authority-claimer/Cargo.lock index 1c64a709c..22c27ec99 100644 --- a/offchain/Cargo.lock +++ b/cmd/authority-claimer/Cargo.lock @@ -12,219 +12,6 @@ dependencies = [ "regex", ] -[[package]] -name = "actix-codec" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" -dependencies = [ - "bitflags 2.5.0", - "bytes", - "futures-core", - "futures-sink", - "memchr", - "pin-project-lite", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "actix-cors" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9e772b3bcafe335042b5db010ab7c09013dad6eac4915c91d8d50902769f331" -dependencies = [ - "actix-utils", - "actix-web", - "derive_more", - "futures-util", - "log 0.4.21", - "once_cell", - "smallvec", -] - -[[package]] -name = "actix-http" -version = "3.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d223b13fd481fc0d1f83bb12659ae774d9e3601814c68a0bc539731698cca743" -dependencies = [ - "actix-codec", - "actix-rt", - "actix-service", - "actix-utils", - "ahash", - "base64 0.21.7", - "bitflags 2.5.0", - "brotli", - "bytes", - "bytestring", - "derive_more", - "encoding_rs", - "flate2", - "futures-core", - "h2 0.3.25", - "http 0.2.12", - "httparse", - "httpdate", - "itoa", - "language-tags", - "local-channel", - "mime", - "percent-encoding", - "pin-project-lite", - "rand 0.8.5", - "sha1", - "smallvec", - "tokio", - "tokio-util", - "tracing", - "zstd 0.13.1", -] - -[[package]] -name = "actix-macros" -version = "0.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" -dependencies = [ - "quote", - "syn 2.0.55", -] - -[[package]] -name = "actix-router" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d22475596539443685426b6bdadb926ad0ecaefdfc5fb05e5e3441f15463c511" -dependencies = [ - "bytestring", - "http 0.2.12", - "regex", - "serde", - "tracing", -] - -[[package]] -name = "actix-rt" -version = "2.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28f32d40287d3f402ae0028a9d54bef51af15c8769492826a69d28f81893151d" -dependencies = [ - "futures-core", - "tokio", -] - -[[package]] -name = "actix-server" -version = "2.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3eb13e7eef0423ea6eab0e59f6c72e7cb46d33691ad56a726b3cd07ddec2c2d4" -dependencies = [ - "actix-rt", - "actix-service", - "actix-utils", - "futures-core", - "futures-util", - "mio", - "socket2", - "tokio", - "tracing", -] - -[[package]] -name = "actix-service" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b894941f818cfdc7ccc4b9e60fa7e53b5042a2e8567270f9147d5591893373a" -dependencies = [ - "futures-core", - "paste", - "pin-project-lite", -] - -[[package]] -name = "actix-tls" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4cce60a2f2b477bc72e5cde0af1812a6e82d8fd85b5570a5dcf2a5bf2c5be5f" -dependencies = [ - "actix-rt", - "actix-service", - "actix-utils", - "futures-core", - "http 0.2.12", - "http 1.1.0", - "impl-more", - "pin-project-lite", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "actix-utils" -version = "3.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88a1dcdff1466e3c2488e1cb5c36a71822750ad43839937f85d2f4d9f8b705d8" -dependencies = [ - "local-waker", - "pin-project-lite", -] - -[[package]] -name = "actix-web" -version = "4.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43a6556ddebb638c2358714d853257ed226ece6023ef9364f23f0c70737ea984" -dependencies = [ - "actix-codec", - "actix-http", - "actix-macros", - "actix-router", - "actix-rt", - "actix-server", - "actix-service", - "actix-utils", - "actix-web-codegen", - "ahash", - "bytes", - "bytestring", - "cfg-if", - "cookie", - "derive_more", - "encoding_rs", - "futures-core", - "futures-util", - "itoa", - "language-tags", - "log 0.4.21", - "mime", - "once_cell", - "pin-project-lite", - "regex", - "serde", - "serde_json", - "serde_urlencoded", - "smallvec", - "socket2", - "time", - "url", -] - -[[package]] -name = "actix-web-codegen" -version = "4.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb1f50ebbb30eca122b188319a4398b3f7bb4a8cdf50ecfb73bfc6a3c3ce54f5" -dependencies = [ - "actix-router", - "proc-macro2", - "quote", - "syn 2.0.55", -] - [[package]] name = "addr2line" version = "0.21.0" @@ -240,32 +27,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "advance-runner" -version = "1.4.0" -dependencies = [ - "backoff", - "clap", - "env_logger", - "grpc-interfaces", - "hex", - "http-health-check", - "log 1.4.0", - "rand 0.8.5", - "rollups-events", - "sha3", - "snafu 0.8.2", - "tempfile", - "test-fixtures", - "test-log", - "testcontainers", - "tokio", - "tonic", - "tracing", - "tracing-subscriber", - "uuid 1.8.0", -] - [[package]] name = "aes" version = "0.8.4" @@ -277,19 +38,6 @@ dependencies = [ "cpufeatures", ] -[[package]] -name = "ahash" -version = "0.8.11" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" -dependencies = [ - "cfg-if", - "getrandom 0.2.12", - "once_cell", - "version_check", - "zerocopy", -] - [[package]] name = "aho-corasick" version = "1.1.3" @@ -299,21 +47,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "alloc-no-stdlib" -version = "2.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" - -[[package]] -name = "alloc-stdlib" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" -dependencies = [ - "alloc-no-stdlib", -] - [[package]] name = "android-tzdata" version = "0.1.1" @@ -395,12 +128,6 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "96d30a06541fbafbc7f82ed10c06164cfbd2c401138f6addd8404629c4b16711" -[[package]] -name = "ascii" -version = "0.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eab1c04a571841102f5345a8fc0f6bb3d31c315dec879b5c6e42e40ce7ffa34e" - [[package]] name = "ascii-canvas" version = "3.0.0" @@ -418,7 +145,7 @@ checksum = "30c5ef0ede93efbf733c1a727f3b6b5a1060bbedd5600183e66f6e4be4af0ec5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -440,7 +167,7 @@ checksum = "16e62a023e7c117e27523144c5d2459f4397fcc3cab0085af8e2224f643a0193" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -451,7 +178,7 @@ checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -470,17 +197,20 @@ name = "authority-claimer" version = "1.4.0" dependencies = [ "async-trait", + "axum", "backoff", + "base64 0.22.0", "clap", - "contracts", + "eth-state-fold", + "eth-state-fold-types", "eth-tx-manager", "ethabi", "ethers", "ethers-signers", - "http-server", - "log 1.4.0", - "redacted", - "rollups-events", + "hex", + "prometheus-client", + "redis", + "reqwest", "rusoto_core", "rusoto_kms", "rusoto_sts", @@ -488,12 +218,12 @@ dependencies = [ "serde_json", "serial_test", "snafu 0.8.2", - "test-fixtures", + "tempfile", "testcontainers", "tokio", "tracing", + "tracing-subscriber", "tracing-test", - "types", "url", ] @@ -517,7 +247,7 @@ checksum = "3c87f3f15e7794432337fc718554eaa4dc8f04c9677a950ffe366f20a162ae42" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -526,67 +256,6 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" -[[package]] -name = "awc" -version = "3.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68c09cc97310b926f01621faee652f3d1b0962545a3cec6c9ac07def9ea36c2c" -dependencies = [ - "actix-codec", - "actix-http", - "actix-rt", - "actix-service", - "actix-tls", - "actix-utils", - "base64 0.21.7", - "bytes", - "cfg-if", - "cookie", - "derive_more", - "futures-core", - "futures-util", - "h2 0.3.25", - "http 0.2.12", - "itoa", - "log 0.4.21", - "mime", - "percent-encoding", - "pin-project-lite", - "rand 0.8.5", - "serde", - "serde_json", - "serde_urlencoded", - "tokio", -] - -[[package]] -name = "axum" -version = "0.6.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" -dependencies = [ - "async-trait", - "axum-core 0.3.4", - "bitflags 1.3.2", - "bytes", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "itoa", - "matchit", - "memchr", - "mime", - "percent-encoding", - "pin-project-lite", - "rustversion", - "serde", - "sync_wrapper 0.1.2", - "tower", - "tower-layer", - "tower-service", -] - [[package]] name = "axum" version = "0.7.5" @@ -594,7 +263,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3a6c9af12842a67734c9a2e355436e5d03b22383ed60cf13cd0c18fbfe3dcbcf" dependencies = [ "async-trait", - "axum-core 0.4.3", + "axum-core", "bytes", "futures-util", "http 1.1.0", @@ -621,23 +290,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "axum-core" -version = "0.3.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" -dependencies = [ - "async-trait", - "bytes", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "mime", - "rustversion", - "tower-layer", - "tower-service", -] - [[package]] name = "axum-core" version = "0.4.3" @@ -666,10 +318,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b62ddb9cb1ec0a098ad4bbf9344d0713fa193ae1a80af55febcff2627b6a00c1" dependencies = [ "futures-core", - "getrandom 0.2.12", + "getrandom", "instant", "pin-project-lite", - "rand 0.8.5", + "rand", "tokio", ] @@ -782,15 +434,6 @@ version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" -[[package]] -name = "bitmaps" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "031043d04099746d8db04daf1fa424b2bc8bd69d92b25962dcde24da39ab64a2" -dependencies = [ - "typenum", -] - [[package]] name = "bitvec" version = "0.17.4" @@ -872,59 +515,12 @@ dependencies = [ "serde_with", ] -[[package]] -name = "brotli" -version = "3.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d640d25bc63c50fb1f0b545ffd80207d2e10a4c965530809b40ba3386825c391" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", - "brotli-decompressor", -] - -[[package]] -name = "brotli-decompressor" -version = "2.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4e2e4afe60d7dd600fdd3de8d0f08c2b7ec039712e3b6137ff98b7004e82de4f" -dependencies = [ - "alloc-no-stdlib", - "alloc-stdlib", -] - [[package]] name = "bs58" version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "771fe0050b883fcc3ea2359b1a96bcfbc090b7116eae7c3c512c7a083fdf23d3" -[[package]] -name = "bson" -version = "1.2.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de0aa578035b938855a710ba58d43cfb4d435f3619f99236fb35922a574d6cb1" -dependencies = [ - "base64 0.13.1", - "chrono", - "hex", - "lazy_static", - "linked-hash-map", - "rand 0.7.3", - "serde", - "serde_json", - "uuid 0.8.2", -] - -[[package]] -name = "built" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38d17f4d6e4dc36d1a02fbedc2753a096848e7c1b0772f7654eab8e2c927dd53" -dependencies = [ - "git2", -] - [[package]] name = "bumpalo" version = "3.15.4" @@ -958,15 +554,6 @@ dependencies = [ "serde", ] -[[package]] -name = "bytestring" -version = "1.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d80203ea6b29df88012294f62733de21cfeab47f17b41af3a38bc30a03ee72" -dependencies = [ - "bytes", -] - [[package]] name = "bzip2" version = "0.4.4" @@ -1080,7 +667,7 @@ dependencies = [ "anstream", "anstyle", "clap_lex", - "strsim 0.11.0", + "strsim 0.11.1", ] [[package]] @@ -1092,7 +679,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -1111,7 +698,7 @@ dependencies = [ "bs58", "coins-core", "digest 0.10.7", - "getrandom 0.2.12", + "getrandom", "hmac 0.12.1", "k256", "lazy_static", @@ -1128,11 +715,11 @@ checksum = "2a11892bcac83b4c6e95ab84b5b06c76d9d70ad73548dd07418269c5c7977171" dependencies = [ "bitvec 0.17.4", "coins-bip32", - "getrandom 0.2.12", + "getrandom", "hex", "hmac 0.12.1", "pbkdf2", - "rand 0.8.5", + "rand", "sha2 0.10.8", "thiserror", ] @@ -1164,19 +751,6 @@ version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" -[[package]] -name = "combine" -version = "3.8.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da3da6baa321ec19e1cc41d31bf599f00c783d0517095cdaf0332e3fe8d20680" -dependencies = [ - "ascii", - "byteorder", - "either", - "memchr", - "unreachable", -] - [[package]] name = "combine" version = "4.6.6" @@ -1203,21 +777,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "contracts" -version = "1.4.0" -dependencies = [ - "eth-state-fold-types", - "snafu 0.8.2", - "tempfile", -] - -[[package]] -name = "convert_case" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e" - [[package]] name = "convert_case" version = "0.6.0" @@ -1227,17 +786,6 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "cookie" -version = "0.16.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e859cd57d0710d9e06c381b550c06e76992472a8c6d527aecd2fc673dcc231fb" -dependencies = [ - "percent-encoding", - "time", - "version_check", -] - [[package]] name = "core-foundation" version = "0.9.4" @@ -1316,7 +864,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef2b4b23cddf68b89b8f8069890e8c270d54e2d5fe1b143820234805e4cb17ef" dependencies = [ "generic-array 0.14.7", - "rand_core 0.6.4", + "rand_core", "subtle", "zeroize", ] @@ -1392,7 +940,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "978747c1d849a7d2ee5e8adc0159961c48fb7e5db2f06af6723b80123bb53856" dependencies = [ "cfg-if", - "hashbrown 0.14.3", + "hashbrown", "lock_api", "once_cell", "parking_lot_core 0.9.9", @@ -1423,70 +971,11 @@ version = "0.99.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4fb810d30a7c1953f91334de7244731fc3f3c10d7fe163338a35b9f640960321" dependencies = [ - "convert_case 0.4.0", "proc-macro2", "quote", - "rustc_version", "syn 1.0.109", ] -[[package]] -name = "derive_utils" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "532b4c15dccee12c7044f1fcad956e98410860b22231e44a3b827464797ca7bf" -dependencies = [ - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "diesel" -version = "2.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03fc05c17098f21b89bc7d98fe1dd3cce2c11c2ad8e145f2a44fe08ed28eb559" -dependencies = [ - "bitflags 2.5.0", - "byteorder", - "diesel_derives", - "itoa", - "pq-sys", - "r2d2", -] - -[[package]] -name = "diesel_derives" -version = "2.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d02eecb814ae714ffe61ddc2db2dd03e6c49a42e269b5001355500d431cce0c" -dependencies = [ - "diesel_table_macro_syntax", - "proc-macro2", - "quote", - "syn 2.0.55", -] - -[[package]] -name = "diesel_migrations" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6036b3f0120c5961381b570ee20a02432d7e2d27ea60de9578799cf9156914ac" -dependencies = [ - "diesel", - "migrations_internals", - "migrations_macros", -] - -[[package]] -name = "diesel_table_macro_syntax" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc5557efc453706fed5e4fa85006fe9817c224c3f480a34c7e5959fd700921c5" -dependencies = [ - "syn 2.0.55", -] - [[package]] name = "diff" version = "0.1.13" @@ -1543,46 +1032,12 @@ dependencies = [ "winapi", ] -[[package]] -name = "dispatcher" -version = "1.4.0" -dependencies = [ - "async-trait", - "backoff", - "clap", - "eth-state-client-lib", - "eth-state-fold-types", - "futures", - "http-server", - "im", - "log 1.4.0", - "rand 0.8.5", - "redis", - "rollups-events", - "serial_test", - "snafu 0.8.2", - "test-fixtures", - "testcontainers", - "tokio", - "tokio-stream", - "tonic", - "tracing", - "tracing-test", - "types", -] - [[package]] name = "doc-comment" version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fea41bba32d969b513997752735605054bc0dfa92b4c56bf1189f2e174be7a10" -[[package]] -name = "downcast" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1435fa1053d8b2fbbe9be7e97eca7f33d37b28409959813daefc1446a14247f1" - [[package]] name = "dtoa" version = "1.0.9" @@ -1627,7 +1082,7 @@ dependencies = [ "generic-array 0.14.7", "group", "pkcs8", - "rand_core 0.6.4", + "rand_core", "sec1", "subtle", "zeroize", @@ -1639,7 +1094,7 @@ version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c533630cf40e9caa44bd91aadc88a75d75a4c3a12b4cfde353cbed41daa1e1f1" dependencies = [ - "log 0.4.21", + "log", ] [[package]] @@ -1651,29 +1106,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "env_filter" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a009aa4810eb158359dda09d0c87378e4bbb89b5a801f016885a4707ba24f7ea" -dependencies = [ - "log 0.4.21", - "regex", -] - -[[package]] -name = "env_logger" -version = "0.11.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38b35839ba51819680ba087cd351788c9a3c476841207e0b8cee0b04722343b9" -dependencies = [ - "anstream", - "anstyle", - "env_filter", - "humantime", - "log 0.4.21", -] - [[package]] name = "equivalent" version = "1.0.1" @@ -1717,32 +1149,14 @@ dependencies = [ "hex", "hmac 0.12.1", "pbkdf2", - "rand 0.8.5", + "rand", "scrypt", "serde", "serde_json", "sha2 0.10.8", "sha3", "thiserror", - "uuid 0.8.2", -] - -[[package]] -name = "eth-state-client-lib" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e40b8c77c0518287f68c2d5fbe9f291aa0787404345fc92796c7ec3ec4ee3a3b" -dependencies = [ - "async-trait", - "clap", - "eth-state-fold-types", - "eth-state-server-common", - "serde", - "serde_json", - "snafu 0.7.5", - "tokio", - "tokio-stream", - "tonic", + "uuid", ] [[package]] @@ -1775,45 +1189,7 @@ dependencies = [ "serde", "serde_json", "snafu 0.7.5", - "toml 0.5.11", -] - -[[package]] -name = "eth-state-server-common" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589cfcf2bb4c3531e7e6bc08e9d00ba874773b3111445c02ab220692db3e3725" -dependencies = [ - "anyhow", - "eth-state-fold-types", - "prost", - "serde", - "serde_json", - "snafu 0.7.5", - "tonic", - "tonic-build", -] - -[[package]] -name = "eth-state-server-lib" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f0141b54e46461538cec27aa45757a679cab0bd0e7c26fa27bfc3b7001488f1" -dependencies = [ - "clap", - "eth-block-history", - "eth-state-fold", - "eth-state-fold-types", - "eth-state-server-common", - "futures", - "serde", - "serde_json", - "snafu 0.7.5", - "tokio", - "tokio-stream", - "tonic", - "tonic-health", - "tracing", + "toml", ] [[package]] @@ -1826,7 +1202,7 @@ dependencies = [ "async-trait", "clap", "ethers", - "reqwest 0.11.24", + "reqwest", "serde", "serde_json", "thiserror", @@ -1940,16 +1316,16 @@ dependencies = [ "dunce", "ethers-core", "eyre", - "getrandom 0.2.12", + "getrandom", "hex", "proc-macro2", "quote", "regex", - "reqwest 0.11.24", + "reqwest", "serde", "serde_json", "syn 1.0.109", - "toml 0.5.11", + "toml", "url", "walkdir", ] @@ -1979,7 +1355,7 @@ dependencies = [ "bytes", "cargo_metadata", "chrono", - "convert_case 0.6.0", + "convert_case", "elliptic-curve", "ethabi", "generic-array 0.14.7", @@ -1988,7 +1364,7 @@ dependencies = [ "once_cell", "open-fastrlp", "proc-macro2", - "rand 0.8.5", + "rand", "rlp", "rlp-derive", "serde", @@ -2007,8 +1383,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a9713f525348e5dde025d09b0a4217429f8074e8ff22c886263cc191e87d8216" dependencies = [ "ethers-core", - "getrandom 0.2.12", - "reqwest 0.11.24", + "getrandom", + "reqwest", "semver", "serde", "serde-aux", @@ -2033,7 +1409,7 @@ dependencies = [ "futures-locks", "futures-util", "instant", - "reqwest 0.11.24", + "reqwest", "serde", "serde_json", "thiserror", @@ -2057,14 +1433,14 @@ dependencies = [ "futures-core", "futures-timer", "futures-util", - "getrandom 0.2.12", + "getrandom", "hashers", "hex", "http 0.2.12", "once_cell", "parking_lot 0.11.2", "pin-project", - "reqwest 0.11.24", + "reqwest", "serde", "serde_json", "thiserror", @@ -2093,7 +1469,7 @@ dependencies = [ "eth-keystore", "ethers-core", "hex", - "rand 0.8.5", + "rand", "rusoto_core", "rusoto_kms", "sha2 0.10.8", @@ -2111,7 +1487,7 @@ dependencies = [ "cfg-if", "dunce", "ethers-core", - "getrandom 0.2.12", + "getrandom", "glob", "hex", "home", @@ -2162,7 +1538,7 @@ version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d013fc25338cc558c5c2cfbad646908fb23591e2404481826742b651c9af7160" dependencies = [ - "rand_core 0.6.4", + "rand_core", "subtle", ] @@ -2173,7 +1549,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "835c052cb0c08c1acf6ffd71c022172e18723949c8282f2b9f27efbc51e64534" dependencies = [ "byteorder", - "rand 0.8.5", + "rand", "rustc-hex", "static_assertions", ] @@ -2224,12 +1600,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "fragile" -version = "2.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c2141d6d6c8512188a7891b4b01590a45f6dac67afb4f255c4124dbb86d4eaa" - [[package]] name = "fs2" version = "0.4.3" @@ -2277,17 +1647,6 @@ version = "0.3.30" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dfc6580bb841c5a68e9ef15c77ccc837b40a7504914d52e47b8b0e9bbda25a1d" -[[package]] -name = "futures-enum" -version = "0.1.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3422d14de7903a52e9dbc10ae05a7e14445ec61890100e098754e120b2bd7b1e" -dependencies = [ - "derive_utils", - "quote", - "syn 1.0.109", -] - [[package]] name = "futures-executor" version = "0.3.30" @@ -2323,7 +1682,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -2390,17 +1749,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "getrandom" -version = "0.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" -dependencies = [ - "cfg-if", - "libc", - "wasi 0.9.0+wasi-snapshot-preview1", -] - [[package]] name = "getrandom" version = "0.2.12" @@ -2410,7 +1758,7 @@ dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "wasm-bindgen", ] @@ -2420,58 +1768,12 @@ version = "0.28.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4271d37baee1b8c7e4b708028c57d816cf9d2434acb33a549475f78c181f6253" -[[package]] -name = "git2" -version = "0.18.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "232e6a7bfe35766bf715e55a88b39a700596c0ccfd88cd3680b4cdb40d66ef70" -dependencies = [ - "bitflags 2.5.0", - "libc", - "libgit2-sys", - "log 0.4.21", - "url", -] - [[package]] name = "glob" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d2fabcfbdc87f4758337ca535fb41a6d701b65693ce38287d856d1674551ec9b" -[[package]] -name = "graphql-parser" -version = "0.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1abd4ce5247dfc04a03ccde70f87a048458c9356c7e41d21ad8c407b3dde6f2" -dependencies = [ - "combine 3.8.1", - "thiserror", -] - -[[package]] -name = "graphql-server" -version = "1.4.0" -dependencies = [ - "actix-cors", - "actix-web", - "awc", - "clap", - "hex", - "http-health-check", - "juniper", - "log 1.4.0", - "rollups-data", - "serde", - "serde_json", - "serial_test", - "snafu 0.8.2", - "test-fixtures", - "testcontainers", - "tokio", - "tracing", -] - [[package]] name = "group" version = "0.12.1" @@ -2479,24 +1781,15 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5dfbfb3a6cfbd390d5c9564ab283a0349b9b9fcd46a706c1eb10e0db70bfbac7" dependencies = [ "ff", - "rand_core 0.6.4", + "rand_core", "subtle", ] -[[package]] -name = "grpc-interfaces" -version = "1.4.0" -dependencies = [ - "prost", - "tonic", - "tonic-build", -] - [[package]] name = "h2" -version = "0.3.25" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbd2820c5e49886948654ab546d0688ff24530286bdcf8fca3cefb16d4618eb" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", @@ -2504,38 +1797,13 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.2.6", - "slab", - "tokio", - "tokio-util", - "tracing", -] - -[[package]] -name = "h2" -version = "0.4.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51ee2dd2e4f378392eeff5d51618cd9a63166a2513846bbc55f21cfacd9199d4" -dependencies = [ - "bytes", - "fnv", - "futures-core", - "futures-sink", - "futures-util", - "http 1.1.0", - "indexmap 2.2.6", + "indexmap", "slab", "tokio", "tokio-util", "tracing", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.14.3" @@ -2603,35 +1871,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "host-runner" -version = "1.4.0" -dependencies = [ - "actix-web", - "async-trait", - "byteorder", - "clap", - "ethabi", - "futures-util", - "grpc-interfaces", - "hex", - "http-health-check", - "log 1.4.0", - "mockall", - "rand 0.8.5", - "reqwest 0.12.2", - "rollups-http-client", - "serde", - "serial_test", - "sha3", - "snafu 0.8.2", - "tokio", - "tonic", - "tonic-health", - "tracing", - "tracing-test", -] - [[package]] name = "http" version = "0.2.12" @@ -2688,28 +1927,6 @@ dependencies = [ "pin-project-lite", ] -[[package]] -name = "http-health-check" -version = "1.4.0" -dependencies = [ - "axum 0.7.5", - "snafu 0.8.2", - "tokio", - "tracing", -] - -[[package]] -name = "http-server" -version = "1.4.0" -dependencies = [ - "axum 0.7.5", - "clap", - "hyper 0.14.28", - "prometheus-client", - "tokio", - "tracing", -] - [[package]] name = "httparse" version = "1.8.0" @@ -2722,12 +1939,6 @@ version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "df3b46402a9d5adb4c86a0cf463f42e19994e3ee891101b1841f30a545cb49a9" -[[package]] -name = "humantime" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a3a5bfb195931eeb336b2a7b4d761daec841b97f947d34394601737a7bba5e4" - [[package]] name = "hyper" version = "0.14.28" @@ -2738,7 +1949,7 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.25", + "h2", "http 0.2.12", "http-body 0.4.6", "httparse", @@ -2761,7 +1972,6 @@ dependencies = [ "bytes", "futures-channel", "futures-util", - "h2 0.4.3", "http 1.1.0", "http-body 1.0.0", "httparse", @@ -2770,7 +1980,6 @@ dependencies = [ "pin-project-lite", "smallvec", "tokio", - "want", ] [[package]] @@ -2787,18 +1996,6 @@ dependencies = [ "tokio-rustls 0.24.1", ] -[[package]] -name = "hyper-timeout" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" -dependencies = [ - "hyper 0.14.28", - "pin-project-lite", - "tokio", - "tokio-io-timeout", -] - [[package]] name = "hyper-tls" version = "0.5.0" @@ -2812,22 +2009,6 @@ dependencies = [ "tokio-native-tls", ] -[[package]] -name = "hyper-tls" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70206fc6890eaca9fde8a0bf71caa2ddfc9fe045ac9e5c70df101a7dbde866e0" -dependencies = [ - "bytes", - "http-body-util", - "hyper 1.2.0", - "hyper-util", - "native-tls", - "tokio", - "tokio-native-tls", - "tower-service", -] - [[package]] name = "hyper-util" version = "0.1.3" @@ -2835,7 +2016,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ca38ef113da30126bbff9cd1705f9273e15d45498615d138b0c20279ac7a76aa" dependencies = [ "bytes", - "futures-channel", "futures-util", "http 1.1.0", "http-body 1.0.0", @@ -2843,9 +2023,6 @@ dependencies = [ "pin-project-lite", "socket2", "tokio", - "tower", - "tower-service", - "tracing", ] [[package]] @@ -2887,21 +2064,6 @@ dependencies = [ "unicode-normalization", ] -[[package]] -name = "im" -version = "15.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0acd33ff0285af998aaf9b57342af478078f53492322fafc47450e09397e0e9" -dependencies = [ - "bitmaps", - "rand_core 0.6.4", - "rand_xoshiro", - "serde", - "sized-chunks", - "typenum", - "version_check", -] - [[package]] name = "impl-codec" version = "0.6.0" @@ -2911,12 +2073,6 @@ dependencies = [ "parity-scale-codec", ] -[[package]] -name = "impl-more" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "206ca75c9c03ba3d4ace2460e57b189f39f43de612c2f85836e65c929701bb2d" - [[package]] name = "impl-rlp" version = "0.3.0" @@ -2952,39 +2108,6 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" -[[package]] -name = "indexer" -version = "1.4.0" -dependencies = [ - "backoff", - "clap", - "env_logger", - "http-health-check", - "log 1.4.0", - "rand 0.8.5", - "rollups-data", - "rollups-events", - "serial_test", - "snafu 0.8.2", - "test-fixtures", - "test-log", - "testcontainers", - "tokio", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", - "serde", -] - [[package]] name = "indexmap" version = "2.2.6" @@ -2992,7 +2115,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", - "hashbrown 0.14.3", + "hashbrown", ] [[package]] @@ -3004,30 +2127,6 @@ dependencies = [ "generic-array 0.14.7", ] -[[package]] -name = "inspect-server" -version = "1.4.0" -dependencies = [ - "actix-cors", - "actix-web", - "clap", - "futures", - "grpc-interfaces", - "hex", - "http-health-check", - "log 1.4.0", - "reqwest 0.12.2", - "serde", - "serial_test", - "snafu 0.8.2", - "tokio", - "toml 0.8.12", - "tonic", - "tracing", - "tracing-actix-web", - "uuid 1.8.0", -] - [[package]] name = "instant" version = "0.1.12" @@ -3090,46 +2189,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "json" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "078e285eafdfb6c4b434e0d31e8cfcb5115b651496faca5749b88fafd4f23bfd" - -[[package]] -name = "juniper" -version = "0.15.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "875dca5a0c08b1521e1bb0ed940e9955a9f38971008aaa2a9f64a2ac6b59e1b5" -dependencies = [ - "async-trait", - "bson", - "chrono", - "fnv", - "futures", - "futures-enum", - "graphql-parser", - "indexmap 1.9.3", - "juniper_codegen", - "serde", - "smartstring", - "static_assertions", - "url", - "uuid 0.8.2", -] - -[[package]] -name = "juniper_codegen" -version = "0.15.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aee97671061ad50301ba077d054d295e01d31a1868fbd07902db651f987e71db" -dependencies = [ - "proc-macro-error", - "proc-macro2", - "quote", - "syn 1.0.109", -] - [[package]] name = "k256" version = "0.11.6" @@ -3183,12 +2242,6 @@ dependencies = [ "regex", ] -[[package]] -name = "language-tags" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4345964bb142484797b161f473a503a434de77149dd8c7427788c6e13379388" - [[package]] name = "lazy_static" version = "1.4.0" @@ -3202,68 +2255,20 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c198f91728a82281a64e1f4f9eeb25d82cb32a5de251c6bd1b5154d63a8e7bd" [[package]] -name = "libgit2-sys" -version = "0.16.2+1.7.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee4126d8b4ee5c9d9ea891dd875cfdc1e9d0950437179104b183d7d8a74d24e8" -dependencies = [ - "cc", - "libc", - "libz-sys", - "pkg-config", -] - -[[package]] -name = "libredox" -version = "0.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85c833ca1e66078851dba29046874e38f08b2c883700aa29a03ddd3b23814ee8" -dependencies = [ - "bitflags 2.5.0", - "libc", - "redox_syscall 0.4.1", -] - -[[package]] -name = "libz-sys" -version = "1.1.16" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - -[[package]] -name = "linux-raw-sys" -version = "0.4.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" - -[[package]] -name = "local-channel" -version = "0.1.5" +name = "libredox" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6cbc85e69b8df4b8bb8b89ec634e7189099cea8927a276b7384ce5488e53ec8" +checksum = "c0ff37bd590ca25063e35af745c343cb7a0271906fb7b37e4813e8f79f00268d" dependencies = [ - "futures-core", - "futures-sink", - "local-waker", + "bitflags 2.5.0", + "libc", ] [[package]] -name = "local-waker" -version = "0.1.4" +name = "linux-raw-sys" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" +checksum = "01cda141df6706de531b6c46c3a33ecca755538219bd484262fa09410c13539c" [[package]] name = "lock_api" @@ -3281,16 +2286,6 @@ version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" -[[package]] -name = "log" -version = "1.4.0" -dependencies = [ - "built", - "clap", - "tracing", - "tracing-subscriber", -] - [[package]] name = "matchers" version = "0.1.0" @@ -3333,27 +2328,6 @@ version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" -[[package]] -name = "migrations_internals" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f23f71580015254b020e856feac3df5878c2c7a8812297edd6c0a485ac9dada" -dependencies = [ - "serde", - "toml 0.7.8", -] - -[[package]] -name = "migrations_macros" -version = "2.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cce3325ac70e67bbab5bd837a31cae01f1a6db64e0e744a33cb03a543469ef08" -dependencies = [ - "migrations_internals", - "proc-macro2", - "quote", -] - [[package]] name = "mime" version = "0.3.17" @@ -3376,50 +2350,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", - "log 0.4.21", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi", "windows-sys 0.48.0", ] -[[package]] -name = "mockall" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43766c2b5203b10de348ffe19f7e54564b64f3d6018ff7648d1e2d6d3a0f0a48" -dependencies = [ - "cfg-if", - "downcast", - "fragile", - "lazy_static", - "mockall_derive", - "predicates", - "predicates-tree", -] - -[[package]] -name = "mockall_derive" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af7cbce79ec385a1d4f54baa90a76401eb15d9cab93685f62e7e9f942aa00ae2" -dependencies = [ - "cfg-if", - "proc-macro2", - "quote", - "syn 2.0.55", -] - -[[package]] -name = "multimap" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" - -[[package]] -name = "mutually_exclusive_features" -version = "0.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d02c0b00610773bb7fc61d85e13d86c7858cbdf00e1a120bfc41bc055dbaa0e" - [[package]] name = "native-tls" version = "0.2.11" @@ -3428,7 +2362,7 @@ checksum = "07226173c32f2926027b63cce4bcd8076c3552846cbe7925f3aaffeac0a3b92e" dependencies = [ "lazy_static", "libc", - "log 0.4.21", + "log", "openssl", "openssl-probe", "openssl-sys", @@ -3554,7 +2488,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -3565,9 +2499,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.101" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dda2b0f344e78efc2facf7d195d098df0dd72151b26ab98da807afc26c198dff" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -3601,7 +2535,7 @@ version = "3.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be30eaf4b0a9fba5336683b38de57bb86d179a35862ba6bfcf57625d006bde5b" dependencies = [ - "proc-macro-crate 2.0.0", + "proc-macro-crate 2.0.2", "proc-macro2", "quote", "syn 1.0.109", @@ -3662,16 +2596,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7676374caaee8a325c9e7a2ae557f216c5563a171d6997b0ef8a65af35147700" dependencies = [ "base64ct", - "rand_core 0.6.4", + "rand_core", "subtle", ] -[[package]] -name = "paste" -version = "1.0.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c" - [[package]] name = "path-slash" version = "0.2.1" @@ -3703,7 +2631,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.6", + "indexmap", ] [[package]] @@ -3734,7 +2662,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5d5285893bb5eb82e6aaf5d59ee909a06a16737a8970984dd7746ba9283498d6" dependencies = [ "phf_shared", - "rand 0.8.5", + "rand", ] [[package]] @@ -3777,14 +2705,14 @@ checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -3820,57 +2748,12 @@ version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5b40af805b3121feab8a3c29f04d8ad262fa8e0561883e7653e024ae4479e6de" -[[package]] -name = "pq-sys" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31c0052426df997c0cbd30789eb44ca097e3541717a7b8fa36b1c464ee7edebd" -dependencies = [ - "vcpkg", -] - [[package]] name = "precomputed-hash" version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c" -[[package]] -name = "predicates" -version = "3.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68b87bfd4605926cdfefc1c3b5f8fe560e3feca9d5552cf68c466d3d8236c7e8" -dependencies = [ - "anstyle", - "predicates-core", -] - -[[package]] -name = "predicates-core" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b794032607612e7abeb4db69adb4e33590fa6cf1149e95fd7cb00e634b92f174" - -[[package]] -name = "predicates-tree" -version = "1.0.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "368ba315fb8c5052ab692e68a0eefec6ec57b23a36959c14496f0b0df2c0cecf" -dependencies = [ - "predicates-core", - "termtree", -] - -[[package]] -name = "prettyplease" -version = "0.1.25" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c8646e95016a7a6c4adea95bafa8a16baab64b583356217f2c85db4a39d9a86" -dependencies = [ - "proc-macro2", - "syn 1.0.109", -] - [[package]] name = "primitive-types" version = "0.12.2" @@ -3897,11 +2780,12 @@ dependencies = [ [[package]] name = "proc-macro-crate" -version = "2.0.0" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e8366a6159044a37876a2b9817124296703c586a5c92e2c53751fa06d8d43e8" +checksum = "b00f26d3400549137f92511a46ac1cd8ce37cb5598a96d382381458b992a5d24" dependencies = [ - "toml_edit 0.20.7", + "toml_datetime", + "toml_edit 0.20.2", ] [[package]] @@ -3963,61 +2847,7 @@ checksum = "440f724eba9f6996b75d63681b0a92b06947f1457076d503a4d2e2c8f56442b8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", -] - -[[package]] -name = "prost" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b82eaa1d779e9a4bc1c3217db8ffbeabaae1dca241bf70183242128d48681cd" -dependencies = [ - "bytes", - "prost-derive", -] - -[[package]] -name = "prost-build" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "119533552c9a7ffacc21e099c24a0ac8bb19c2a2a3f363de84cd9b844feab270" -dependencies = [ - "bytes", - "heck 0.4.1", - "itertools", - "lazy_static", - "log 0.4.21", - "multimap", - "petgraph", - "prettyplease", - "prost", - "prost-types", - "regex", - "syn 1.0.109", - "tempfile", - "which", -] - -[[package]] -name = "prost-derive" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5d2d8d10f3c6ded6da8b05b5fb3b8a5082514344d56c9f871412d29b4e075b4" -dependencies = [ - "anyhow", - "itertools", - "proc-macro2", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "prost-types" -version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213622a1460818959ac1181aaeb2dc9c7f63df720db7d788b3e24eacd1983e13" -dependencies = [ - "prost", + "syn 2.0.58", ] [[package]] @@ -4029,17 +2859,6 @@ dependencies = [ "proc-macro2", ] -[[package]] -name = "r2d2" -version = "0.8.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51de85fb3fb6524929c8a2eb85e6b6d363de4e8c48f9e2c2eac4944abc181c93" -dependencies = [ - "log 0.4.21", - "parking_lot 0.12.1", - "scheduled-thread-pool", -] - [[package]] name = "radium" version = "0.3.0" @@ -4052,19 +2871,6 @@ version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dc33ff2d4973d518d823d61aa239014831e521c75da58e3df4840d3f47749d09" -[[package]] -name = "rand" -version = "0.7.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a6b1679d49b24bbfe0c803429aa1874472f50d9b363131f0e89fc356b544d03" -dependencies = [ - "getrandom 0.1.16", - "libc", - "rand_chacha 0.2.2", - "rand_core 0.5.1", - "rand_hc", -] - [[package]] name = "rand" version = "0.8.5" @@ -4072,18 +2878,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", - "rand_chacha 0.3.1", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_chacha" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4c8ed856279c9737206bf725bf36935d8666ead7aa69b52be55af369d193402" -dependencies = [ - "ppv-lite86", - "rand_core 0.5.1", + "rand_chacha", + "rand_core", ] [[package]] @@ -4093,16 +2889,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.4", -] - -[[package]] -name = "rand_core" -version = "0.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90bde5296fc891b0cef12a6d03ddccc162ce7b2aff54160af9338f8d40df6d19" -dependencies = [ - "getrandom 0.1.16", + "rand_core", ] [[package]] @@ -4111,25 +2898,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.12", -] - -[[package]] -name = "rand_hc" -version = "0.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca3129af7b92a17112d59ad498c6f81eaf463253766b90396d39ea7a39d6613c" -dependencies = [ - "rand_core 0.5.1", -] - -[[package]] -name = "rand_xoshiro" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f97cdb2a36ed4183de61b2f824cc45c9f1037f28afe0a322e9fff4c108b5aaa" -dependencies = [ - "rand_core 0.6.4", + "getrandom", ] [[package]] @@ -4152,32 +2921,25 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "redacted" -version = "1.4.0" -dependencies = [ - "url", -] - [[package]] name = "redis" -version = "0.25.2" +version = "0.25.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d64e978fd98a0e6b105d066ba4889a7301fca65aeac850a877d8797343feeb" +checksum = "6472825949c09872e8f2c50bde59fcefc17748b6be5c90fd67cd8b4daca73bfd" dependencies = [ "arc-swap", "async-trait", "bytes", - "combine 4.6.6", + "combine", "crc16", "futures", "futures-util", "itoa", - "log 0.4.21", + "log", "native-tls", "percent-encoding", "pin-project-lite", - "rand 0.8.5", + "rand", "ryu", "sha1_smol", "socket2", @@ -4208,11 +2970,11 @@ dependencies = [ [[package]] name = "redox_users" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a18479200779601e498ada4e8c1e1f50e3ee19deb0259c25825a98b5603b2cb4" +checksum = "bd283d9651eeda4b2a83a43c1c91b266c40fd76ecd39a50a8c630ae69dc72891" dependencies = [ - "getrandom 0.2.12", + "getrandom", "libredox", "thiserror", ] @@ -4272,15 +3034,15 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.25", + "h2", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.28", "hyper-rustls", - "hyper-tls 0.5.0", + "hyper-tls", "ipnet", "js-sys", - "log 0.4.21", + "log", "mime", "native-tls", "once_cell", @@ -4305,48 +3067,6 @@ dependencies = [ "winreg", ] -[[package]] -name = "reqwest" -version = "0.12.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d66674f2b6fb864665eea7a3c1ac4e3dfacd2fda83cf6f935a612e01b0e3338" -dependencies = [ - "base64 0.21.7", - "bytes", - "encoding_rs", - "futures-core", - "futures-util", - "h2 0.4.3", - "http 1.1.0", - "http-body 1.0.0", - "http-body-util", - "hyper 1.2.0", - "hyper-tls 0.6.0", - "hyper-util", - "ipnet", - "js-sys", - "log 0.4.21", - "mime", - "native-tls", - "once_cell", - "percent-encoding", - "pin-project-lite", - "rustls-pemfile", - "serde", - "serde_json", - "serde_urlencoded", - "sync_wrapper 0.1.2", - "system-configuration", - "tokio", - "tokio-native-tls", - "tower-service", - "url", - "wasm-bindgen", - "wasm-bindgen-futures", - "web-sys", - "winreg", -] - [[package]] name = "rfc6979" version = "0.3.1" @@ -4381,7 +3101,7 @@ checksum = "c17fa4cb658e3583423e915b9f3acc01cceaee1860e33d59ebae66adc3a2dc0d" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.12", + "getrandom", "libc", "spin 0.9.8", "untrusted 0.9.0", @@ -4418,59 +3138,6 @@ dependencies = [ "syn 1.0.109", ] -[[package]] -name = "rollups-data" -version = "1.4.0" -dependencies = [ - "backoff", - "base64 0.22.0", - "clap", - "diesel", - "diesel_migrations", - "env_logger", - "redacted", - "serial_test", - "snafu 0.8.2", - "tempfile", - "test-fixtures", - "test-log", - "testcontainers", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "rollups-events" -version = "1.4.0" -dependencies = [ - "backoff", - "base64 0.22.0", - "clap", - "env_logger", - "hex", - "prometheus-client", - "redacted", - "redis", - "serde", - "serde_json", - "snafu 0.8.2", - "test-log", - "testcontainers", - "tokio", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "rollups-http-client" -version = "1.4.0" -dependencies = [ - "hyper 0.14.28", - "serde", - "serde_json", - "tracing", -] - [[package]] name = "rusoto_core" version = "0.48.0" @@ -4484,9 +3151,9 @@ dependencies = [ "futures", "http 0.2.12", "hyper 0.14.28", - "hyper-tls 0.5.0", + "hyper-tls", "lazy_static", - "log 0.4.21", + "log", "rusoto_credential", "rusoto_signature", "rustc_version", @@ -4543,7 +3210,7 @@ dependencies = [ "hmac 0.11.0", "http 0.2.12", "hyper 0.14.28", - "log 0.4.21", + "log", "md-5 0.9.1", "percent-encoding", "pin-project-lite", @@ -4609,7 +3276,7 @@ version = "0.20.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b80e3dec595989ea8510028f30c408a4630db12c9cbb8de34203b89d6577e99" dependencies = [ - "log 0.4.21", + "log", "ring 0.16.20", "sct", "webpki", @@ -4621,7 +3288,7 @@ version = "0.21.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d5a6813c0759e4609cd494e8e725babae6a2ca7b62a5536a13daaec6fcb7ba" dependencies = [ - "log 0.4.21", + "log", "ring 0.17.8", "rustls-webpki", "sct", @@ -4709,15 +3376,6 @@ dependencies = [ "windows-sys 0.52.0", ] -[[package]] -name = "scheduled-thread-pool" -version = "0.2.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cbc66816425a074528352f5789333ecff06ca41b36b0b0efdfbb29edc391a19" -dependencies = [ - "parking_lot 0.12.1", -] - [[package]] name = "scopeguard" version = "1.2.0" @@ -4762,9 +3420,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -4775,9 +3433,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -4825,7 +3483,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -4834,7 +3492,6 @@ version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ - "indexmap 2.2.6", "itoa", "ryu", "serde", @@ -4850,15 +3507,6 @@ dependencies = [ "serde", ] -[[package]] -name = "serde_spanned" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb3622f419d1296904700073ea6cc23ad690adbd66f13ea683df73298736f0c1" -dependencies = [ - "serde", -] - [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -4902,7 +3550,7 @@ dependencies = [ "dashmap", "futures", "lazy_static", - "log 0.4.21", + "log", "parking_lot 0.12.1", "serial_test_derive", ] @@ -4915,7 +3563,7 @@ checksum = "b93fb4adc70021ac1b47f7d45e8cc4169baaa7ea58483bc5b721d19a26202212" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5023,7 +3671,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" dependencies = [ "digest 0.10.7", - "rand_core 0.6.4", + "rand_core", ] [[package]] @@ -5032,16 +3680,6 @@ version = "0.3.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "38b58827f4464d87d377d175e90bf58eb00fd8716ff0a62f80356b5e61555d0d" -[[package]] -name = "sized-chunks" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "16d69225bde7a69b235da73377861095455d298f2b970996eec25ddbb42b3d1e" -dependencies = [ - "bitmaps", - "typenum", -] - [[package]] name = "slab" version = "0.4.9" @@ -5057,17 +3695,6 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" -[[package]] -name = "smartstring" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fb72c633efbaa2dd666986505016c32c3044395ceaf881518399d2f4127ee29" -dependencies = [ - "autocfg", - "static_assertions", - "version_check", -] - [[package]] name = "snafu" version = "0.7.5" @@ -5108,7 +3735,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5156,25 +3783,6 @@ dependencies = [ "der", ] -[[package]] -name = "state-server" -version = "1.4.0" -dependencies = [ - "clap", - "eth-block-history", - "eth-state-fold", - "eth-state-fold-types", - "eth-state-server-lib", - "log 1.4.0", - "serde", - "snafu 0.8.2", - "tokio", - "tonic", - "tracing", - "types", - "url", -] - [[package]] name = "static_assertions" version = "1.1.0" @@ -5202,9 +3810,9 @@ checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" [[package]] name = "strsim" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ee073c9e4cd00e28217186dbe12796d692868f432bf2e97ee73bed0c56dfa01" +checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "strum" @@ -5244,7 +3852,7 @@ dependencies = [ "hex", "home", "once_cell", - "reqwest 0.11.24", + "reqwest", "semver", "serde", "serde_json", @@ -5267,9 +3875,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.55" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "002a1b3dbf967edfafc32655d0f377ab0bb7b994aa1d32c8cc7e9b8bf3ebb8f0" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -5338,53 +3946,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "termtree" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3369f5ac52d5eb6ab48c6b4ffdc8efbcad6b89c765749064ba298f2c68a16a76" - -[[package]] -name = "test-fixtures" -version = "1.4.0" -dependencies = [ - "anyhow", - "backoff", - "grpc-interfaces", - "hyper 0.14.28", - "json", - "rollups-data", - "rollups-events", - "tempfile", - "testcontainers", - "tokio", - "tonic", - "tracing", - "users", -] - -[[package]] -name = "test-log" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b319995299c65d522680decf80f2c108d85b861d81dfe340a10d16cee29d9e6" -dependencies = [ - "env_logger", - "test-log-macros", - "tracing-subscriber", -] - -[[package]] -name = "test-log-macros" -version = "0.2.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8f546451eaa38373f549093fe9fd05e7d2bade739e2ddf834b9968621d60107" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.55", -] - [[package]] name = "testcontainers" version = "0.14.0" @@ -5395,8 +3956,8 @@ dependencies = [ "futures", "hex", "hmac 0.12.1", - "log 0.4.21", - "rand 0.8.5", + "log", + "rand", "serde", "serde_json", "sha2 0.10.8", @@ -5419,7 +3980,7 @@ checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5439,12 +4000,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", - "itoa", "num-conv", "powerfmt", "serde", "time-core", - "time-macros", ] [[package]] @@ -5453,16 +4012,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ef927ca75afb808a4d64dd374f00a2adf8d0fcff8e7b184af886c3c87ec4a3f3" -[[package]] -name = "time-macros" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ba3a3ef41e6672a2f0f001392bb5dcd3ff0a9992d618ca761a11c3121547774" -dependencies = [ - "num-conv", - "time-core", -] - [[package]] name = "tiny-keccak" version = "2.0.2" @@ -5498,7 +4047,6 @@ dependencies = [ "libc", "mio", "num_cpus", - "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", "socket2", @@ -5506,16 +4054,6 @@ dependencies = [ "windows-sys 0.48.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.2.0" @@ -5524,7 +4062,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5544,7 +4082,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f57eb36ecbe0fc510036adff84824dd3c24bb781e21bfa67b69d556aa85214f" dependencies = [ "pin-project", - "rand 0.8.5", + "rand", "tokio", ] @@ -5587,7 +4125,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f714dd15bead90401d77e04243611caec13726c2408afd5b31901dfcdcb3b181" dependencies = [ "futures-util", - "log 0.4.21", + "log", "rustls 0.20.9", "tokio", "tokio-rustls 0.23.4", @@ -5619,38 +4157,11 @@ dependencies = [ "serde", ] -[[package]] -name = "toml" -version = "0.7.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dd79e69d3b627db300ff956027cc6c3798cef26d22526befdfcd12feeb6d2257" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.19.15", -] - -[[package]] -name = "toml" -version = "0.8.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e9dd1545e8208b4a5af1aa9bbd0b4cf7e9ea08fabc5d0a5c67fcaafa17433aa3" -dependencies = [ - "serde", - "serde_spanned", - "toml_datetime", - "toml_edit 0.22.9", -] - [[package]] name = "toml_datetime" -version = "0.6.5" +version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3550f4e9685620ac18a50ed434eb3aec30db8ba93b0287467bca5826ea25baf1" -dependencies = [ - "serde", -] +checksum = "7cda73e2f1397b1262d6dfdcef8aafae14d1de7748d66822d3bfeeb6d03e5e4b" [[package]] name = "toml_edit" @@ -5658,89 +4169,20 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.6", - "serde", - "serde_spanned", - "toml_datetime", - "winnow 0.5.40", -] - -[[package]] -name = "toml_edit" -version = "0.20.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "70f427fce4d84c72b5b732388bf4a9f4531b53f74e2887e3ecb2481f68f66d81" -dependencies = [ - "indexmap 2.2.6", + "indexmap", "toml_datetime", - "winnow 0.5.40", + "winnow", ] [[package]] name = "toml_edit" -version = "0.22.9" +version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e40bb779c5187258fd7aad0eb68cb8706a0a81fa712fbea808ab43c4b8374c4" +checksum = "396e4d48bbb2b7554c944bde63101b5ae446cff6ec4a24227428f15eb72ef338" dependencies = [ - "indexmap 2.2.6", - "serde", - "serde_spanned", + "indexmap", "toml_datetime", - "winnow 0.6.5", -] - -[[package]] -name = "tonic" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3082666a3a6433f7f511c7192923fa1fe07c69332d3c6a2e6bb040b569199d5a" -dependencies = [ - "async-trait", - "axum 0.6.20", - "base64 0.21.7", - "bytes", - "futures-core", - "futures-util", - "h2 0.3.25", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.28", - "hyper-timeout", - "percent-encoding", - "pin-project", - "prost", - "tokio", - "tokio-stream", - "tower", - "tower-layer", - "tower-service", - "tracing", -] - -[[package]] -name = "tonic-build" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6fdaae4c2c638bb70fe42803a26fbd6fc6ac8c72f5c59f67ecc2a2dcabf4b07" -dependencies = [ - "prettyplease", - "proc-macro2", - "prost-build", - "quote", - "syn 1.0.109", -] - -[[package]] -name = "tonic-health" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "080964d45894b90273d2b1dd755fdd114560db8636bb41cea615213c45043c4d" -dependencies = [ - "async-stream", - "prost", - "tokio", - "tokio-stream", - "tonic", + "winnow", ] [[package]] @@ -5751,13 +4193,9 @@ checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", - "indexmap 1.9.3", "pin-project", "pin-project-lite", - "rand 0.8.5", - "slab", "tokio", - "tokio-util", "tower-layer", "tower-service", "tracing", @@ -5781,25 +4219,12 @@ version = "0.1.40" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" dependencies = [ - "log 0.4.21", + "log", "pin-project-lite", "tracing-attributes", "tracing-core", ] -[[package]] -name = "tracing-actix-web" -version = "0.7.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa069bd1503dd526ee793bb3fce408895136c95fc86d2edb2acf1c646d7f0684" -dependencies = [ - "actix-web", - "mutually_exclusive_features", - "pin-project", - "tracing", - "uuid 1.8.0", -] - [[package]] name = "tracing-attributes" version = "0.1.27" @@ -5808,7 +4233,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", ] [[package]] @@ -5837,7 +4262,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ee855f1f400bd0e5c02d150ae5de3840039a3f54b025156404e34c23c03f47c3" dependencies = [ - "log 0.4.21", + "log", "once_cell", "tracing-core", ] @@ -5900,8 +4325,8 @@ dependencies = [ "bytes", "http 0.2.12", "httparse", - "log 0.4.21", - "rand 0.8.5", + "log", + "rand", "rustls 0.20.9", "sha-1", "thiserror", @@ -5916,23 +4341,6 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "42ff0bf0c66b8238c6f3b578df37d0b7848e55df8577b3f74f92a69acceeb825" -[[package]] -name = "types" -version = "1.4.0" -dependencies = [ - "anyhow", - "async-trait", - "clap", - "contracts", - "eth-state-fold", - "eth-state-fold-types", - "im", - "rollups-events", - "serde", - "serde_json", - "snafu 0.8.2", -] - [[package]] name = "uint" version = "0.9.5" @@ -5978,15 +4386,6 @@ version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" -[[package]] -name = "unreachable" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56" -dependencies = [ - "void", -] - [[package]] name = "untrusted" version = "0.7.1" @@ -6010,16 +4409,6 @@ dependencies = [ "percent-encoding", ] -[[package]] -name = "users" -version = "0.11.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24cc0f6d6f267b73e5a2cadf007ba8f9bc39c6a6f9666f8cf25ea809a153b032" -dependencies = [ - "libc", - "log 0.4.21", -] - [[package]] name = "utf-8" version = "0.7.6" @@ -6038,19 +4427,10 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.12", + "getrandom", "serde", ] -[[package]] -name = "uuid" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" -dependencies = [ - "getrandom 0.2.12", -] - [[package]] name = "valuable" version = "0.1.0" @@ -6069,12 +4449,6 @@ version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" -[[package]] -name = "void" -version = "1.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a02e4885ed3bc0f2de90ea6dd45ebcbb66dacffe03547fadbb0eeae2770887d" - [[package]] name = "walkdir" version = "2.5.0" @@ -6094,12 +4468,6 @@ dependencies = [ "try-lock", ] -[[package]] -name = "wasi" -version = "0.9.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -6123,11 +4491,11 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", - "log 0.4.21", + "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", "wasm-bindgen-shared", ] @@ -6161,7 +4529,7 @@ checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.55", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] @@ -6222,18 +4590,6 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix", -] - [[package]] name = "winapi" version = "0.3.9" @@ -6415,15 +4771,6 @@ dependencies = [ "memchr", ] -[[package]] -name = "winnow" -version = "0.6.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dffa400e67ed5a4dd237983829e66475f0a4a26938c4b04c21baede6262215b8" -dependencies = [ - "memchr", -] - [[package]] name = "winreg" version = "0.50.0" @@ -6443,7 +4790,7 @@ dependencies = [ "async_io_stream", "futures", "js-sys", - "log 0.4.21", + "log", "pharos", "rustc_version", "send_wrapper", @@ -6464,9 +4811,9 @@ dependencies = [ [[package]] name = "xml-rs" -version = "0.8.19" +version = "0.8.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fcb9cbac069e033553e8bb871be2fbdffcab578eb25bd0f7c508cedc6dcd75a" +checksum = "791978798f0597cfc70478424c2b4fdc2b7a8024aaff78497ef00f24ef674193" [[package]] name = "yansi" @@ -6474,26 +4821,6 @@ version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" -[[package]] -name = "zerocopy" -version = "0.7.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74d4d3961e53fa4c9a25a8637fc2bfaf2595b3d3ae34875568a5cf64787716be" -dependencies = [ - "zerocopy-derive", -] - -[[package]] -name = "zerocopy-derive" -version = "0.7.32" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.55", -] - [[package]] name = "zeroize" version = "1.7.0" @@ -6517,7 +4844,7 @@ dependencies = [ "pbkdf2", "sha1", "time", - "zstd 0.11.2+zstd.1.5.2", + "zstd", ] [[package]] @@ -6526,16 +4853,7 @@ version = "0.11.2+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "20cc960326ece64f010d2d2107537f26dc589a6573a316bd5b1dba685fa5fde4" dependencies = [ - "zstd-safe 5.0.2+zstd.1.5.2", -] - -[[package]] -name = "zstd" -version = "0.13.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d789b1514203a1120ad2429eae43a7bd32b90976a7bb8a05f7ec02fa88cc23a" -dependencies = [ - "zstd-safe 7.1.0", + "zstd-safe", ] [[package]] @@ -6548,15 +4866,6 @@ dependencies = [ "zstd-sys", ] -[[package]] -name = "zstd-safe" -version = "7.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cd99b45c6bc03a018c8b8a86025678c87e55526064e38f9df301989dce7ec0a" -dependencies = [ - "zstd-sys", -] - [[package]] name = "zstd-sys" version = "2.0.10+zstd.1.5.6" diff --git a/cmd/authority-claimer/Cargo.toml b/cmd/authority-claimer/Cargo.toml new file mode 100644 index 000000000..cdabc5196 --- /dev/null +++ b/cmd/authority-claimer/Cargo.toml @@ -0,0 +1,54 @@ +[package] +name = "authority-claimer" +version = "1.4.0" +license = "Apache-2.0" +edition = "2021" +resolver = "2" + +[[bin]] +name = "cartesi-rollups-authority-claimer" +path = "src/main.rs" +test = false + +[dependencies] +async-trait = "0.1" +axum = "0.7" +backoff = {version = "0.4", features = ["tokio"]} +base64 = "0.22" +clap = {version = "4.5", features = ["string", "derive", "env"]} +ethabi = "18.0" +ethers = "1.0" +ethers-signers = {version = "1.0", features = ["aws"]} +eth-state-fold = "0.9" +eth-state-fold-types = {version = "0.9", features = ["ethers"]} +eth-tx-manager = "0.10" +hex = "0.4" +prometheus-client = "0.22" +redis = {version = "0.25", features = ["streams", "tokio-comp", "connection-manager", "tls-native-tls", "tokio-native-tls-comp", "cluster", "cluster-async"]} +reqwest = "=0.11.24" # Set specific reqwest version to fix the build +rusoto_core = "0.48" +rusoto_kms = "0.48" +rusoto_sts = "0.48" +serde = "1.0" +serde_json = "1.0" +snafu = "0.8" +tokio = {version = "1.37", features = ["macros", "rt-multi-thread", "rt"]} +tracing = "0.1" +tracing-subscriber = { version = "0.3", features = ["env-filter"] } +url = "2.5" + +[build-dependencies] +eth-state-fold-types = {version = "0.9", features = ["ethers"]} +snafu = "0.8" +tempfile = "3.10" + +[dev-dependencies] +serial_test = "3.0" +testcontainers = "0.14" +tracing-test = {version = "0.2", features = ["no-env-filter"]} + +[profile.release] +strip = true # Automatically strip symbols from the binary. + +[package.metadata.cargo-machete] +ignored = ["reqwest"] diff --git a/offchain/authority-claimer/README.md b/cmd/authority-claimer/README.md similarity index 100% rename from offchain/authority-claimer/README.md rename to cmd/authority-claimer/README.md diff --git a/offchain/contracts/build.rs b/cmd/authority-claimer/build.rs similarity index 96% rename from offchain/contracts/build.rs rename to cmd/authority-claimer/build.rs index 91140acc9..d1a1118bb 100644 --- a/offchain/contracts/build.rs +++ b/cmd/authority-claimer/build.rs @@ -20,10 +20,8 @@ fn main() -> Result<(), Box> { unzip_contracts(&tarball, tempdir.path())?; let contracts = vec![ - ("inputs", "InputBox", "input_box.rs"), ("consensus/authority", "Authority", "authority.rs"), ("history", "History", "history.rs"), - ("dapp", "CartesiDApp", "cartesi_dapp.rs"), ]; for (contract_path, contract_name, bindings_file_name) in contracts { let source_path = path(tempdir.path(), contract_path, contract_name); diff --git a/offchain/authority-claimer/src/checker.rs b/cmd/authority-claimer/src/checker.rs similarity index 96% rename from offchain/authority-claimer/src/checker.rs rename to cmd/authority-claimer/src/checker.rs index 3ca293574..a005e766e 100644 --- a/offchain/authority-claimer/src/checker.rs +++ b/cmd/authority-claimer/src/checker.rs @@ -1,8 +1,9 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use crate::contracts::history::{Claim, History}; +use crate::rollups_events::{Address, RollupsClaim}; use async_trait::async_trait; -use contracts::history::{Claim, History}; use ethers::{ self, contract::ContractError, @@ -11,7 +12,6 @@ use ethers::{ }, types::H160, }; -use rollups_events::{Address, RollupsClaim}; use snafu::{ensure, ResultExt, Snafu}; use std::sync::Arc; use std::{collections::HashMap, fmt::Debug}; @@ -123,8 +123,7 @@ impl DuplicateChecker for DefaultDuplicateChecker { let expected_first_index = self .claims // HashMap => DappAddress to Vec .get(&rollups_claim.dapp_address) // Gets a Option> - .map(|claims| claims.last()) // Maps to Option> - .flatten() // Back to only one Option + .and_then(|claims| claims.last()) // Back to only one Option .map(|claim| claim.last_index + 1) // Maps to a number .unwrap_or(0); // If None, unwrap to 0 if rollups_claim.first_index == expected_first_index { diff --git a/offchain/authority-claimer/src/claimer.rs b/cmd/authority-claimer/src/claimer.rs similarity index 95% rename from offchain/authority-claimer/src/claimer.rs rename to cmd/authority-claimer/src/claimer.rs index 43c475d20..8e3bbc033 100644 --- a/offchain/authority-claimer/src/claimer.rs +++ b/cmd/authority-claimer/src/claimer.rs @@ -32,13 +32,13 @@ pub enum ClaimerError< T: TransactionSender, > { #[snafu(display("broker listener error"))] - BrokerListenerError { source: B::Error }, + BrokerListener { source: B::Error }, #[snafu(display("duplicated claim error"))] - DuplicatedClaimError { source: D::Error }, + DuplicatedClaim { source: D::Error }, #[snafu(display("transaction sender error"))] - TransactionSenderError { source: T::Error }, + TransactionSender { source: T::Error }, } // ------------------------------------------------------------------------------------------------ diff --git a/offchain/authority-claimer/src/config/cli.rs b/cmd/authority-claimer/src/config/cli.rs similarity index 97% rename from offchain/authority-claimer/src/config/cli.rs rename to cmd/authority-claimer/src/config/cli.rs index 5df9c6ffd..d22e754de 100644 --- a/offchain/authority-claimer/src/config/cli.rs +++ b/cmd/authority-claimer/src/config/cli.rs @@ -1,18 +1,7 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use clap::{command, Parser}; -use eth_tx_manager::{ - config::{TxEnvCLIConfig as TxManagerCLIConfig, TxManagerConfig}, - Priority, -}; -use log::{LogConfig, LogEnvCliConfig}; -use redacted::Redacted; -use rollups_events::{BrokerCLIConfig, BrokerConfig}; -use rusoto_core::Region; -use snafu::ResultExt; -use std::{fs, str::FromStr}; - +use super::contracts::ContractsCLIConfig; use crate::config::{ error::{ AuthorityClaimerConfigError, ContractsSnafu, InvalidRegionSnafu, @@ -21,8 +10,17 @@ use crate::config::{ }, AuthorityClaimerConfig, ContractsConfig, TxSigningConfig, }; - -use super::contracts::ContractsCLIConfig; +use crate::log::{LogConfig, LogEnvCliConfig}; +use crate::redacted::Redacted; +use crate::rollups_events::{BrokerCLIConfig, BrokerConfig}; +use clap::{command, Parser}; +use eth_tx_manager::{ + config::{TxEnvCLIConfig as TxManagerCLIConfig, TxManagerConfig}, + Priority, +}; +use rusoto_core::Region; +use snafu::ResultExt; +use std::{fs, str::FromStr}; // ------------------------------------------------------------------------------------------------ // AuthorityClaimerCLI diff --git a/offchain/authority-claimer/src/config/contracts.rs b/cmd/authority-claimer/src/config/contracts.rs similarity index 88% rename from offchain/authority-claimer/src/config/contracts.rs rename to cmd/authority-claimer/src/config/contracts.rs index e9221f21c..16506f9b9 100644 --- a/offchain/authority-claimer/src/config/contracts.rs +++ b/cmd/authority-claimer/src/config/contracts.rs @@ -1,12 +1,12 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use crate::rollups_events::Address; +use crate::types::blockchain_config::RollupsDeployment; use clap::Parser; -use rollups_events::Address; use serde::de::DeserializeOwned; use snafu::{ResultExt, Snafu}; use std::{fs::File, io::BufReader, path::PathBuf}; -use types::blockchain_config::RollupsDeployment; #[derive(Clone, Debug)] pub struct ContractsConfig { @@ -50,16 +50,10 @@ impl TryFrom for ContractsConfig { .map(read::) .transpose()? { - history_address = history_address.or(file - .contracts - .history - .map(|c| c.address) - .flatten()); - authority_address = authority_address.or(file - .contracts - .authority - .map(|c| c.address) - .flatten()); + history_address = history_address + .or(file.contracts.history.and_then(|c| c.address)); + authority_address = authority_address + .or(file.contracts.authority.and_then(|c| c.address)); } Ok(ContractsConfig { diff --git a/offchain/authority-claimer/src/config/error.rs b/cmd/authority-claimer/src/config/error.rs similarity index 86% rename from offchain/authority-claimer/src/config/error.rs rename to cmd/authority-claimer/src/config/error.rs index e40411670..fa961980e 100644 --- a/offchain/authority-claimer/src/config/error.rs +++ b/cmd/authority-claimer/src/config/error.rs @@ -11,13 +11,13 @@ use super::ContractsConfigError; #[snafu(visibility(pub(crate)))] pub enum AuthorityClaimerConfigError { #[snafu(display("TxManager configuration error"))] - TxManagerError { source: TxManagerConfigError }, + TxManager { source: TxManagerConfigError }, #[snafu(display("TxSigning configuration error"))] - TxSigningError { source: TxSigningConfigError }, + TxSigning { source: TxSigningConfigError }, #[snafu(display("Contracts configuration error"))] - ContractsError { source: ContractsConfigError }, + Contracts { source: ContractsConfigError }, } #[derive(Debug, Snafu)] diff --git a/offchain/authority-claimer/src/config/mod.rs b/cmd/authority-claimer/src/config/mod.rs similarity index 88% rename from offchain/authority-claimer/src/config/mod.rs rename to cmd/authority-claimer/src/config/mod.rs index 21d1932b2..9fffb4fac 100644 --- a/offchain/authority-claimer/src/config/mod.rs +++ b/cmd/authority-claimer/src/config/mod.rs @@ -6,14 +6,14 @@ mod contracts; mod error; pub use contracts::{ContractsConfig, ContractsConfigError}; -pub use error::{AuthorityClaimerConfigError, TxSigningConfigError}; +pub use error::AuthorityClaimerConfigError; +use crate::http_server::HttpServerConfig; +use crate::log::LogConfig; +use crate::redacted::Redacted; +use crate::rollups_events::BrokerConfig; use cli::AuthorityClaimerCLI; use eth_tx_manager::{config::TxManagerConfig, Priority}; -use http_server::HttpServerConfig; -use log::LogConfig; -use redacted::Redacted; -use rollups_events::BrokerConfig; use rusoto_core::Region; #[derive(Debug, Clone)] diff --git a/offchain/contracts/src/lib.rs b/cmd/authority-claimer/src/contracts.rs similarity index 91% rename from offchain/contracts/src/lib.rs rename to cmd/authority-claimer/src/contracts.rs index b81fbdd3a..bdc43df33 100644 --- a/offchain/contracts/src/lib.rs +++ b/cmd/authority-claimer/src/contracts.rs @@ -15,7 +15,5 @@ macro_rules! contract { }; } -contract!(input_box); contract!(authority); contract!(history); -contract!(cartesi_dapp); diff --git a/offchain/http-server/src/lib.rs b/cmd/authority-claimer/src/http_server.rs similarity index 52% rename from offchain/http-server/src/lib.rs rename to cmd/authority-claimer/src/http_server.rs index 0fbbf914e..1c8fb97d6 100644 --- a/offchain/http-server/src/lib.rs +++ b/cmd/authority-claimer/src/http_server.rs @@ -1,9 +1,6 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -mod config; -pub use config::HttpServerConfig; - // Re-exporting prometheus' Registry. pub use prometheus_client::registry::Registry; @@ -13,8 +10,48 @@ pub use prometheus_client::metrics::counter::Counter as CounterRef; pub use prometheus_client::metrics::family::Family as FamilyRef; // End of metrics to re-export. -// Re-exporting hyper error. -pub use hyper::Error as HttpServerError; +use clap::{ + value_parser, Arg, Command, CommandFactory, FromArgMatches, Parser, +}; + +#[derive(Debug, Clone, Parser)] +pub struct HttpServerConfig { + pub(crate) port: u16, +} + +impl HttpServerConfig { + /// Returns the HTTP server config and the app's config after parsing + /// it from the command line and/or environment variables. + /// + /// The parameter `service` must be a lowercase string that + /// uses underlines as spaces. + /// + /// The parametric type `C` must be a struct that derives `Parser`. + pub fn parse( + service: &'static str, + ) -> (HttpServerConfig, C) { + let command = ::command(); + let command = add_port_arg(command, service); + + let matches = command.get_matches(); + let http_server_config: HttpServerConfig = + FromArgMatches::from_arg_matches(&matches).unwrap(); + let inner_config: C = + FromArgMatches::from_arg_matches(&matches).unwrap(); + (http_server_config, inner_config) + } +} + +fn add_port_arg(command: Command, service: S) -> Command { + let service = service.to_string().to_uppercase(); + command.arg( + Arg::new("port") + .long("http-server-port") + .env(format!("{}_HTTP_SERVER_PORT", service)) + .value_parser(value_parser!(u16)) + .default_value("8080"), + ) +} use axum::{routing::get, Router}; use prometheus_client::encoding::text::encode; diff --git a/offchain/authority-claimer/src/lib.rs b/cmd/authority-claimer/src/lib.rs similarity index 89% rename from offchain/authority-claimer/src/lib.rs rename to cmd/authority-claimer/src/lib.rs index 944efb4b4..7120f5629 100644 --- a/offchain/authority-claimer/src/lib.rs +++ b/cmd/authority-claimer/src/lib.rs @@ -1,17 +1,22 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -pub mod checker; -pub mod claimer; -pub mod config; -pub mod listener; -pub mod metrics; -pub mod sender; -pub mod signer; +mod checker; +mod claimer; +mod config; +mod contracts; +mod http_server; +mod listener; +pub mod log; +mod metrics; +mod redacted; +mod rollups_events; +mod sender; +mod signer; +mod types; -use config::Config; -use snafu::Error; -use tracing::trace; +#[cfg(test)] +mod test_fixtures; use crate::{ checker::DefaultDuplicateChecker, @@ -20,6 +25,9 @@ use crate::{ metrics::AuthorityClaimerMetrics, sender::DefaultTransactionSender, }; +pub use config::Config; +use snafu::Error; +use tracing::trace; pub async fn run(config: Config) -> Result<(), Box> { // Creating the metrics and health server. diff --git a/offchain/authority-claimer/src/listener.rs b/cmd/authority-claimer/src/listener.rs similarity index 96% rename from offchain/authority-claimer/src/listener.rs rename to cmd/authority-claimer/src/listener.rs index 3f50a1cb1..358979250 100644 --- a/offchain/authority-claimer/src/listener.rs +++ b/cmd/authority-claimer/src/listener.rs @@ -1,11 +1,11 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use async_trait::async_trait; -use rollups_events::{ +use crate::rollups_events::{ Broker, BrokerConfig, BrokerError, RollupsClaim, RollupsClaimsStream, INITIAL_ID, }; +use async_trait::async_trait; use snafu::ResultExt; use std::fmt::Debug; @@ -72,18 +72,15 @@ impl BrokerListener for DefaultBrokerListener { #[cfg(test)] mod tests { - use std::time::Duration; - use testcontainers::clients::Cli; - - use test_fixtures::BrokerFixture; - use crate::listener::{BrokerListener, DefaultBrokerListener}; - - use backoff::ExponentialBackoffBuilder; - use rollups_events::{ - BrokerConfig, BrokerEndpoint, BrokerError, RedactedUrl, RollupsClaim, - Url, + use crate::redacted::{RedactedUrl, Url}; + use crate::rollups_events::{ + broker::BrokerEndpoint, BrokerConfig, BrokerError, RollupsClaim, }; + use crate::test_fixtures::BrokerFixture; + use backoff::ExponentialBackoffBuilder; + use std::time::Duration; + use testcontainers::clients::Cli; // ------------------------------------------------------------------------------------------------ // Broker Mock diff --git a/offchain/log/src/lib.rs b/cmd/authority-claimer/src/log.rs similarity index 75% rename from offchain/log/src/lib.rs rename to cmd/authority-claimer/src/log.rs index 426afef03..5f2d6be72 100644 --- a/offchain/log/src/lib.rs +++ b/cmd/authority-claimer/src/log.rs @@ -1,15 +1,11 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use std::fmt::Debug; use clap::Parser; +use std::fmt::Debug; use tracing::info; use tracing_subscriber::filter::{EnvFilter, LevelFilter}; -pub mod built_info { - include!(concat!(env!("OUT_DIR"), "/built.rs")); -} - #[derive(Debug, Parser)] #[command(name = "log_config")] pub struct LogEnvCliConfig { @@ -62,10 +58,7 @@ pub fn configure(config: &LogConfig) { } } -pub fn log_service_start(config: &C, service_name: &str) { - let git_ref = built_info::GIT_HEAD_REF.unwrap_or("N/A"); - let git_hash = built_info::GIT_COMMIT_HASH.unwrap_or("N/A"); - - let message = format!("Starting {service} (version={version}, git ref={git_ref}, git hash={git_hash}) with config {:?}",config, service = service_name, version = built_info::PKG_VERSION, git_ref = git_ref, git_hash = git_hash); +pub fn log_service_start(config: &C, service: &str) { + let message = format!("Starting {} with config {:?}", service, config); info!(message); } diff --git a/offchain/authority-claimer/src/main.rs b/cmd/authority-claimer/src/main.rs similarity index 93% rename from offchain/authority-claimer/src/main.rs rename to cmd/authority-claimer/src/main.rs index 0185876db..145e8551e 100644 --- a/offchain/authority-claimer/src/main.rs +++ b/cmd/authority-claimer/src/main.rs @@ -1,7 +1,7 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use authority_claimer::config::Config; +use authority_claimer::{log, Config}; use std::error::Error; #[tokio::main] diff --git a/offchain/authority-claimer/src/metrics.rs b/cmd/authority-claimer/src/metrics.rs similarity index 89% rename from offchain/authority-claimer/src/metrics.rs rename to cmd/authority-claimer/src/metrics.rs index 3ffd67a66..7108c88e5 100644 --- a/offchain/authority-claimer/src/metrics.rs +++ b/cmd/authority-claimer/src/metrics.rs @@ -1,8 +1,8 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use http_server::{CounterRef, FamilyRef, Registry}; -use rollups_events::DAppMetadata; +use crate::http_server::{CounterRef, FamilyRef, Registry}; +use crate::rollups_events::DAppMetadata; const METRICS_PREFIX: &str = "cartesi_rollups_authority_claimer"; diff --git a/offchain/redacted/src/lib.rs b/cmd/authority-claimer/src/redacted.rs similarity index 100% rename from offchain/redacted/src/lib.rs rename to cmd/authority-claimer/src/redacted.rs diff --git a/offchain/rollups-events/src/broker/mod.rs b/cmd/authority-claimer/src/rollups_events/broker.rs similarity index 99% rename from offchain/rollups-events/src/broker/mod.rs rename to cmd/authority-claimer/src/rollups_events/broker.rs index fa622a3d1..9df69c5ed 100644 --- a/offchain/rollups-events/src/broker/mod.rs +++ b/cmd/authority-claimer/src/rollups_events/broker.rs @@ -17,9 +17,7 @@ use snafu::{ResultExt, Snafu}; use std::fmt; use std::time::Duration; -pub use redacted::{RedactedUrl, Url}; - -pub mod indexer; +use crate::redacted::{RedactedUrl, Url}; pub const INITIAL_ID: &str = "0"; diff --git a/offchain/rollups-events/src/common.rs b/cmd/authority-claimer/src/rollups_events/common.rs similarity index 97% rename from offchain/rollups-events/src/common.rs rename to cmd/authority-claimer/src/rollups_events/common.rs index 1665896f7..5fbb73fa0 100644 --- a/offchain/rollups-events/src/common.rs +++ b/cmd/authority-claimer/src/rollups_events/common.rs @@ -109,14 +109,6 @@ impl Payload { pub fn inner(&self) -> &Vec { &self.0 } - - pub fn mut_inner(&mut self) -> &mut Vec { - &mut self.0 - } - - pub fn into_inner(self) -> Vec { - self.0 - } } impl From> for Payload { diff --git a/cmd/authority-claimer/src/rollups_events/mod.rs b/cmd/authority-claimer/src/rollups_events/mod.rs new file mode 100644 index 000000000..17a8f04af --- /dev/null +++ b/cmd/authority-claimer/src/rollups_events/mod.rs @@ -0,0 +1,15 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +pub mod broker; +pub mod common; +pub mod rollups_claims; +pub mod rollups_stream; + +pub use broker::{ + Broker, BrokerCLIConfig, BrokerConfig, BrokerError, BrokerStream, + INITIAL_ID, +}; +pub use common::{Address, Hash}; +pub use rollups_claims::{RollupsClaim, RollupsClaimsStream}; +pub use rollups_stream::DAppMetadata; diff --git a/offchain/rollups-events/src/rollups_claims.rs b/cmd/authority-claimer/src/rollups_events/rollups_claims.rs similarity index 95% rename from offchain/rollups-events/src/rollups_claims.rs rename to cmd/authority-claimer/src/rollups_events/rollups_claims.rs index 69cb2a486..7c5abc6ae 100644 --- a/offchain/rollups-events/src/rollups_claims.rs +++ b/cmd/authority-claimer/src/rollups_events/rollups_claims.rs @@ -3,7 +3,7 @@ use serde::{Deserialize, Serialize}; -use crate::{Address, BrokerStream, Hash}; +use super::{Address, BrokerStream, Hash}; #[derive(Debug)] pub struct RollupsClaimsStream { diff --git a/offchain/rollups-events/src/rollups_stream.rs b/cmd/authority-claimer/src/rollups_events/rollups_stream.rs similarity index 55% rename from offchain/rollups-events/src/rollups_stream.rs rename to cmd/authority-claimer/src/rollups_events/rollups_stream.rs index 7dd677fd3..732fef5f6 100644 --- a/offchain/rollups-events/src/rollups_stream.rs +++ b/cmd/authority-claimer/src/rollups_events/rollups_stream.rs @@ -1,7 +1,7 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use crate::Address; +use super::Address; use clap::Parser; use prometheus_client::encoding::EncodeLabelSet; use serde_json::Value; @@ -62,61 +62,3 @@ impl From for DAppMetadata { } } } - -/// Declares a struct that implements the BrokerStream interface -/// The generated key has the format `{chain-:dapp-}:`. -/// The curly braces define a hash tag to ensure that all of a dapp's streams -/// are located in the same node when connected to a Redis cluster. -macro_rules! decl_broker_stream { - ($stream: ident, $payload: ty, $key: literal) => { - #[derive(Debug)] - pub struct $stream { - key: String, - } - - impl crate::broker::BrokerStream for $stream { - type Payload = $payload; - - fn key(&self) -> &str { - &self.key - } - } - - impl $stream { - pub fn new(metadata: &crate::rollups_stream::DAppMetadata) -> Self { - Self { - key: format!( - "{{chain-{}:dapp-{}}}:{}", - metadata.chain_id, - hex::encode(metadata.dapp_address.inner()), - $key - ), - } - } - } - }; -} - -pub(crate) use decl_broker_stream; - -#[cfg(test)] -mod tests { - use super::*; - use crate::ADDRESS_SIZE; - use serde::{Deserialize, Serialize}; - - #[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] - pub struct MockPayload; - - decl_broker_stream!(MockStream, MockPayload, "rollups-mock"); - - #[test] - fn it_generates_the_key() { - let metadata = DAppMetadata { - chain_id: 123, - dapp_address: Address::new([0xfa; ADDRESS_SIZE]), - }; - let stream = MockStream::new(&metadata); - assert_eq!(stream.key, "{chain-123:dapp-fafafafafafafafafafafafafafafafafafafafa}:rollups-mock"); - } -} diff --git a/offchain/authority-claimer/src/sender.rs b/cmd/authority-claimer/src/sender.rs similarity index 98% rename from offchain/authority-claimer/src/sender.rs rename to cmd/authority-claimer/src/sender.rs index 428e45574..634ecb0bf 100644 --- a/offchain/authority-claimer/src/sender.rs +++ b/cmd/authority-claimer/src/sender.rs @@ -1,8 +1,9 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use crate::contracts::{authority::Authority, history::Claim}; +use crate::rollups_events::{DAppMetadata, RollupsClaim}; use async_trait::async_trait; -use contracts::{authority::Authority, history::Claim}; use eth_tx_manager::{ database::FileSystemDatabase as Database, gas_oracle::DefaultGasOracle as GasOracle, @@ -22,7 +23,6 @@ use ethers::{ signers::Signer, types::{Bytes, NameOrAddress, H160}, }; -use rollups_events::{DAppMetadata, RollupsClaim}; use snafu::{OptionExt, ResultExt, Snafu}; use std::fmt::Debug; use std::sync::Arc; diff --git a/offchain/authority-claimer/src/signer/aws_credentials.rs b/cmd/authority-claimer/src/signer/aws_credentials.rs similarity index 100% rename from offchain/authority-claimer/src/signer/aws_credentials.rs rename to cmd/authority-claimer/src/signer/aws_credentials.rs diff --git a/offchain/authority-claimer/src/signer/aws_signer.rs b/cmd/authority-claimer/src/signer/aws_signer.rs similarity index 100% rename from offchain/authority-claimer/src/signer/aws_signer.rs rename to cmd/authority-claimer/src/signer/aws_signer.rs diff --git a/offchain/authority-claimer/src/signer/mod.rs b/cmd/authority-claimer/src/signer/mod.rs similarity index 100% rename from offchain/authority-claimer/src/signer/mod.rs rename to cmd/authority-claimer/src/signer/mod.rs diff --git a/offchain/authority-claimer/src/signer/signer.rs b/cmd/authority-claimer/src/signer/signer.rs similarity index 99% rename from offchain/authority-claimer/src/signer/signer.rs rename to cmd/authority-claimer/src/signer/signer.rs index a38333c93..14fd25ce1 100644 --- a/offchain/authority-claimer/src/signer/signer.rs +++ b/cmd/authority-claimer/src/signer/signer.rs @@ -157,14 +157,13 @@ impl Signer for ConditionalSigner { #[cfg(test)] mod tests { + use crate::redacted::Redacted; + use crate::{config::TxSigningConfig, signer::ConditionalSigner}; use ethers::types::{ transaction::{eip2718::TypedTransaction, eip2930::AccessList}, Address, Eip1559TransactionRequest, }; use ethers_signers::Signer; - use redacted::Redacted; - - use crate::{config::TxSigningConfig, signer::ConditionalSigner}; // -------------------------------------------------------------------------------------------- // new diff --git a/offchain/test-fixtures/src/broker.rs b/cmd/authority-claimer/src/test_fixtures.rs similarity index 55% rename from offchain/test-fixtures/src/broker.rs rename to cmd/authority-claimer/src/test_fixtures.rs index 95b9ce19b..38662cc54 100644 --- a/offchain/test-fixtures/src/broker.rs +++ b/cmd/authority-claimer/src/test_fixtures.rs @@ -1,13 +1,12 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use backoff::ExponentialBackoff; -use rollups_events::{ - Address, Broker, BrokerConfig, BrokerEndpoint, DAppMetadata, Event, - RedactedUrl, RollupsClaim, RollupsClaimsStream, RollupsData, RollupsInput, - RollupsInputsStream, RollupsOutput, RollupsOutputsStream, Url, - ADDRESS_SIZE, INITIAL_ID, +use crate::redacted::{RedactedUrl, Url}; +use crate::rollups_events::{ + broker::BrokerEndpoint, common::ADDRESS_SIZE, Address, Broker, + BrokerConfig, DAppMetadata, RollupsClaim, RollupsClaimsStream, INITIAL_ID, }; +use backoff::ExponentialBackoff; use testcontainers::{ clients::Cli, core::WaitFor, images::generic::GenericImage, Container, }; @@ -20,12 +19,9 @@ const CONSUME_TIMEOUT: usize = 10_000; // ms pub struct BrokerFixture<'d> { _node: Container<'d, GenericImage>, client: Mutex, - inputs_stream: RollupsInputsStream, claims_stream: RollupsClaimsStream, - outputs_stream: RollupsOutputsStream, redis_endpoint: BrokerEndpoint, chain_id: u64, - dapp_address: Address, } impl BrokerFixture<'_> { @@ -45,15 +41,12 @@ impl BrokerFixture<'_> { .expect("failed to parse Redis Url"), ); let chain_id = CHAIN_ID; - let dapp_address = DAPP_ADDRESS; let backoff = ExponentialBackoff::default(); let metadata = DAppMetadata { chain_id, - dapp_address: dapp_address.clone(), + dapp_address: DAPP_ADDRESS.clone(), }; - let inputs_stream = RollupsInputsStream::new(&metadata); let claims_stream = RollupsClaimsStream::new(metadata.chain_id); - let outputs_stream = RollupsOutputsStream::new(&metadata); let config = BrokerConfig { redis_endpoint: redis_endpoint.clone(), consume_timeout: CONSUME_TIMEOUT, @@ -72,12 +65,9 @@ impl BrokerFixture<'_> { BrokerFixture { _node: node, client, - inputs_stream, claims_stream, - outputs_stream, redis_endpoint, chain_id, - dapp_address, } } @@ -89,81 +79,6 @@ impl BrokerFixture<'_> { self.chain_id } - pub fn dapp_address(&self) -> &Address { - &self.dapp_address - } - - pub fn dapp_metadata(&self) -> DAppMetadata { - DAppMetadata { - chain_id: self.chain_id, - dapp_address: self.dapp_address.clone(), - } - } - - /// Obtain the latest event from the rollups inputs stream - #[tracing::instrument(level = "trace", skip_all)] - pub async fn get_latest_input_event(&self) -> Option> { - tracing::trace!("getting latest input event"); - self.client - .lock() - .await - .peek_latest(&self.inputs_stream) - .await - .expect("failed to get latest input event") - } - - /// Produce the input event given the data - /// Return the produced event id - #[tracing::instrument(level = "trace", skip_all)] - pub async fn produce_input_event(&self, data: RollupsData) -> String { - tracing::trace!(?data, "producing rollups-input event"); - let last_event = self.get_latest_input_event().await; - let epoch_index = match last_event.as_ref() { - Some(event) => match event.payload.data { - RollupsData::AdvanceStateInput { .. } => { - event.payload.epoch_index - } - RollupsData::FinishEpoch {} => event.payload.epoch_index + 1, - }, - None => 0, - }; - let previous_inputs_sent_count = match last_event.as_ref() { - Some(event) => event.payload.inputs_sent_count, - None => 0, - }; - let inputs_sent_count = match data { - RollupsData::AdvanceStateInput { .. } => { - previous_inputs_sent_count + 1 - } - RollupsData::FinishEpoch {} => previous_inputs_sent_count, - }; - let parent_id = match last_event { - Some(event) => event.id, - None => INITIAL_ID.to_owned(), - }; - let input = RollupsInput { - parent_id, - epoch_index, - inputs_sent_count, - data, - }; - self.produce_raw_input_event(input).await - } - - /// Produce the input event given the input - /// This may produce inconsistent inputs - /// Return the produced event id - #[tracing::instrument(level = "trace", skip_all)] - pub async fn produce_raw_input_event(&self, input: RollupsInput) -> String { - tracing::trace!(?input, "producing rollups-input raw event"); - self.client - .lock() - .await - .produce(&self.inputs_stream, input) - .await - .expect("failed to produce event") - } - /// Produce the claim given the hash #[tracing::instrument(level = "trace", skip_all)] pub async fn produce_rollups_claim(&self, rollups_claim: RollupsClaim) { @@ -233,16 +148,4 @@ impl BrokerFixture<'_> { } claims } - - /// Produce an output event - #[tracing::instrument(level = "trace", skip_all)] - pub async fn produce_output(&self, output: RollupsOutput) { - tracing::trace!(?output, "producing rollups-outputs event"); - self.client - .lock() - .await - .produce(&self.outputs_stream, output) - .await - .expect("failed to produce output"); - } } diff --git a/offchain/types/src/blockchain_config.rs b/cmd/authority-claimer/src/types/blockchain_config.rs similarity index 92% rename from offchain/types/src/blockchain_config.rs rename to cmd/authority-claimer/src/types/blockchain_config.rs index e6250a0a3..81c72fb23 100644 --- a/offchain/types/src/blockchain_config.rs +++ b/cmd/authority-claimer/src/types/blockchain_config.rs @@ -1,8 +1,8 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use crate::rollups_events::Address; use clap::{command, Parser}; -use rollups_events::Address; use serde::{de::DeserializeOwned, Deserialize}; use snafu::{ResultExt, Snafu}; use std::{fs::File, io::BufReader, path::PathBuf}; @@ -122,21 +122,12 @@ impl TryFrom for BlockchainConfig { .map(read::) .transpose()? { - history_address = history_address.or(file - .contracts - .history - .map(|c| c.address) - .flatten()); - authority_address = authority_address.or(file - .contracts - .authority - .map(|c| c.address) - .flatten()); - input_box_address = input_box_address.or(file - .contracts - .input_box - .map(|c| c.address) - .flatten()); + history_address = history_address + .or(file.contracts.history.and_then(|c| c.address)); + authority_address = authority_address + .or(file.contracts.authority.and_then(|c| c.address)); + input_box_address = input_box_address + .or(file.contracts.input_box.and_then(|c| c.address)); } Ok(BlockchainConfig { diff --git a/offchain/types/src/error.rs b/cmd/authority-claimer/src/types/error.rs similarity index 86% rename from offchain/types/src/error.rs rename to cmd/authority-claimer/src/types/error.rs index 4e29c11c9..e4d7cfa29 100644 --- a/offchain/types/src/error.rs +++ b/cmd/authority-claimer/src/types/error.rs @@ -1,13 +1,13 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use anyhow::Error; use eth_state_fold::Foldable; use eth_state_fold_types::ethers::prelude::{ContractError, Middleware}; +use std::error::Error; use std::fmt::{Display, Formatter}; #[derive(Debug)] -pub struct FoldableError(Error); +pub struct FoldableError(Box); impl Display for FoldableError { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { @@ -17,8 +17,8 @@ impl Display for FoldableError { impl std::error::Error for FoldableError {} -impl From for FoldableError { - fn from(error: Error) -> Self { +impl From> for FoldableError { + fn from(error: Box) -> Self { Self(error) } } diff --git a/offchain/rollups-http-client/src/lib.rs b/cmd/authority-claimer/src/types/mod.rs similarity index 65% rename from offchain/rollups-http-client/src/lib.rs rename to cmd/authority-claimer/src/types/mod.rs index c3d11944f..a9502b0b3 100644 --- a/offchain/rollups-http-client/src/lib.rs +++ b/cmd/authority-claimer/src/types/mod.rs @@ -1,5 +1,6 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -pub mod client; -pub mod rollup; +pub mod blockchain_config; +pub mod error; +pub mod utils; diff --git a/offchain/types/src/utils.rs b/cmd/authority-claimer/src/types/utils.rs similarity index 83% rename from offchain/types/src/utils.rs rename to cmd/authority-claimer/src/types/utils.rs index 44d11d711..c21275037 100644 --- a/offchain/types/src/utils.rs +++ b/cmd/authority-claimer/src/types/utils.rs @@ -13,19 +13,6 @@ where right: Peekable, } -impl MergeAscending -where - L: Iterator, - R: Iterator, -{ - pub fn new(left: L, right: R) -> Self { - MergeAscending { - left: left.peekable(), - right: right.peekable(), - } - } -} - impl Iterator for MergeAscending where L: Iterator, diff --git a/offchain/rust-toolchain.toml b/cmd/rust-toolchain.toml similarity index 100% rename from offchain/rust-toolchain.toml rename to cmd/rust-toolchain.toml diff --git a/docs/config.md b/docs/config.md index a48b45f4e..a37d5e231 100644 --- a/docs/config.md +++ b/docs/config.md @@ -177,16 +177,6 @@ the snapshot matches the hash in the Application contract. * **Type:** `bool` * **Default:** `"false"` -## `CARTESI_FEATURE_HOST_MODE` - -If set to true, the node will run in host mode. - -In host mode, computations will not be performed by the cartesi machine. -You should only use host mode for development and debugging! - -* **Type:** `bool` -* **Default:** `"false"` - ## `CARTESI_HTTP_ADDRESS` HTTP address for the node. diff --git a/internal/node/config/config.go b/internal/node/config/config.go index 6f9d6391d..13a5d8b21 100644 --- a/internal/node/config/config.go +++ b/internal/node/config/config.go @@ -31,7 +31,6 @@ type NodeConfig struct { PostgresEndpoint Redacted[string] HttpAddress string HttpPort int - FeatureHostMode bool FeatureDisableClaimer bool FeatureDisableMachineHashCheck bool ExperimentalServerManagerBypassLog bool @@ -86,13 +85,10 @@ func FromEnv() NodeConfig { config.ContractsAuthorityAddress = getContractsAuthorityAddress() config.ContractsInputBoxAddress = getContractsInputBoxAddress() config.ContractsInputBoxDeploymentBlockNumber = getContractsInputBoxDeploymentBlockNumber() - if !getFeatureHostMode() { - config.SnapshotDir = getSnapshotDir() - } + config.SnapshotDir = getSnapshotDir() config.PostgresEndpoint = Redacted[string]{getPostgresEndpoint()} config.HttpAddress = getHttpAddress() config.HttpPort = getHttpPort() - config.FeatureHostMode = getFeatureHostMode() config.FeatureDisableClaimer = getFeatureDisableClaimer() config.FeatureDisableMachineHashCheck = getFeatureDisableMachineHashCheck() config.ExperimentalServerManagerBypassLog = getExperimentalServerManagerBypassLog() diff --git a/internal/node/config/generate/Config.toml b/internal/node/config/generate/Config.toml index bda385b6c..a5fbc716a 100644 --- a/internal/node/config/generate/Config.toml +++ b/internal/node/config/generate/Config.toml @@ -19,15 +19,6 @@ If set to true, the node will add colors to its log output.""" # Features # -[features.CARTESI_FEATURE_HOST_MODE] -default = "false" -go-type = "bool" -description = """ -If set to true, the node will run in host mode. - -In host mode, computations will not be performed by the cartesi machine. -You should only use host mode for development and debugging!""" - [features.CARTESI_FEATURE_DISABLE_CLAIMER] default = "false" go-type = "bool" diff --git a/internal/node/config/generated.go b/internal/node/config/generated.go index bb60cf9d5..ab4955025 100644 --- a/internal/node/config/generated.go +++ b/internal/node/config/generated.go @@ -389,18 +389,6 @@ func getFeatureDisableMachineHashCheck() bool { return val } -func getFeatureHostMode() bool { - s, ok := os.LookupEnv("CARTESI_FEATURE_HOST_MODE") - if !ok { - s = "false" - } - val, err := toBool(s) - if err != nil { - panic(fmt.Sprintf("failed to parse CARTESI_FEATURE_HOST_MODE: %v", err)) - } - return val -} - func getHttpAddress() string { s, ok := os.LookupEnv("CARTESI_HTTP_ADDRESS") if !ok { diff --git a/internal/node/handlers.go b/internal/node/handlers.go index 250344194..cae5c545a 100644 --- a/internal/node/handlers.go +++ b/internal/node/handlers.go @@ -4,11 +4,8 @@ package node import ( - "fmt" "log/slog" "net/http" - "net/http/httputil" - "net/url" "github.com/cartesi/rollups-node/internal/node/config" ) @@ -16,21 +13,6 @@ import ( func newHttpServiceHandler(c config.NodeConfig) http.Handler { handler := http.NewServeMux() handler.Handle("/healthz", http.HandlerFunc(healthcheckHandler)) - - graphqlProxy := newReverseProxy(c.HttpAddress, getPort(c, portOffsetGraphQLServer)) - handler.Handle("/graphql", graphqlProxy) - - dispatcherProxy := newReverseProxy(c.HttpAddress, getPort(c, portOffsetDispatcher)) - handler.Handle("/metrics", dispatcherProxy) - - inspectProxy := newReverseProxy(c.HttpAddress, getPort(c, portOffsetInspectServer)) - handler.Handle("/inspect", inspectProxy) - handler.Handle("/inspect/", inspectProxy) - - if c.FeatureHostMode { - hostProxy := newReverseProxy(c.HttpAddress, getPort(c, portOffsetHostRunnerRollups)) - handler.Handle("/rollup/", http.StripPrefix("/rollup", hostProxy)) - } return handler } @@ -38,13 +20,3 @@ func healthcheckHandler(w http.ResponseWriter, r *http.Request) { slog.Debug("Node received a healthcheck request") w.WriteHeader(http.StatusOK) } - -func newReverseProxy(address string, port int) *httputil.ReverseProxy { - urlStr := fmt.Sprintf("http://%v:%v/", address, port) - url, err := url.Parse(urlStr) - if err != nil { - panic(fmt.Sprintf("failed to parse url: %v", err)) - } - proxy := httputil.NewSingleHostReverseProxy(url) - return proxy -} diff --git a/internal/node/services.go b/internal/node/services.go index 892c3db16..dcf0a9793 100644 --- a/internal/node/services.go +++ b/internal/node/services.go @@ -17,25 +17,11 @@ type portOffset = int const ( portOffsetProxy = iota - portOffsetAdvanceRunner portOffsetAuthorityClaimer - portOffsetDispatcher - portOffsetGraphQLServer - portOffsetGraphQLHealthcheck - portOffsetHostRunnerHealthcheck - portOffsetHostRunnerRollups - portOffsetIndexer - portOffsetInspectServer - portOffsetInspectHealthcheck portOffsetRedis - portOffsetServerManager - portOffsetStateServer ) -const ( - localhost = "127.0.0.1" - serverManagerSessionId = "default_session_id" -) +const localhost = "127.0.0.1" // Get the port of the given service. func getPort(c config.NodeConfig, offset portOffset) int { @@ -68,37 +54,6 @@ func getRustLog(c config.NodeConfig, rustModule string) string { } } -func newAdvanceRunner(c config.NodeConfig, workDir string) services.CommandService { - var s services.CommandService - s.Name = "advance-runner" - s.HealthcheckPort = getPort(c, portOffsetAdvanceRunner) - s.Path = "cartesi-rollups-advance-runner" - s.Env = append(s.Env, "LOG_ENABLE_TIMESTAMP=false") - s.Env = append(s.Env, "LOG_ENABLE_COLOR=false") - s.Env = append(s.Env, getRustLog(c, "advance_runner")) - s.Env = append(s.Env, fmt.Sprintf("SERVER_MANAGER_ENDPOINT=http://%v:%v", - localhost, getPort(c, portOffsetServerManager))) - s.Env = append(s.Env, fmt.Sprintf("SESSION_ID=%v", serverManagerSessionId)) - s.Env = append(s.Env, fmt.Sprintf("REDIS_ENDPOINT=%v", getRedisEndpoint(c))) - s.Env = append(s.Env, fmt.Sprintf("CHAIN_ID=%v", c.BlockchainID)) - s.Env = append(s.Env, fmt.Sprintf("DAPP_CONTRACT_ADDRESS=%v", - c.ContractsApplicationAddress)) - s.Env = append(s.Env, fmt.Sprintf("PROVIDER_HTTP_ENDPOINT=%v", - c.BlockchainHttpEndpoint.Value)) - s.Env = append(s.Env, fmt.Sprintf("ADVANCE_RUNNER_HEALTHCHECK_PORT=%v", - getPort(c, portOffsetAdvanceRunner))) - s.Env = append(s.Env, fmt.Sprintf("READER_MODE=%v", c.FeatureDisableClaimer)) - if c.FeatureHostMode || c.FeatureDisableMachineHashCheck { - s.Env = append(s.Env, "SNAPSHOT_VALIDATION_ENABLED=false") - } - if !c.FeatureHostMode { - s.Env = append(s.Env, fmt.Sprintf("MACHINE_SNAPSHOT_PATH=%v", c.SnapshotDir)) - } - s.Env = append(s.Env, os.Environ()...) - s.WorkDir = workDir - return s -} - func newAuthorityClaimer(c config.NodeConfig, workDir string) services.CommandService { var s services.CommandService s.Name = "authority-claimer" @@ -141,113 +96,6 @@ func newAuthorityClaimer(c config.NodeConfig, workDir string) services.CommandSe return s } -func newDispatcher(c config.NodeConfig, workDir string) services.CommandService { - var s services.CommandService - s.Name = "dispatcher" - s.HealthcheckPort = getPort(c, portOffsetDispatcher) - s.Path = "cartesi-rollups-dispatcher" - s.Env = append(s.Env, "LOG_ENABLE_TIMESTAMP=false") - s.Env = append(s.Env, "LOG_ENABLE_COLOR=false") - s.Env = append(s.Env, getRustLog(c, "dispatcher")) - s.Env = append(s.Env, fmt.Sprintf("SC_GRPC_ENDPOINT=http://%v:%v", localhost, - getPort(c, portOffsetStateServer))) - s.Env = append(s.Env, fmt.Sprintf("SC_DEFAULT_CONFIRMATIONS=%v", - c.BlockchainFinalityOffset)) - s.Env = append(s.Env, fmt.Sprintf("REDIS_ENDPOINT=%v", getRedisEndpoint(c))) - s.Env = append(s.Env, fmt.Sprintf("DAPP_ADDRESS=%v", c.ContractsApplicationAddress)) - s.Env = append(s.Env, fmt.Sprintf("INPUT_BOX_DEPLOYMENT_BLOCK_NUMBER=%v", - c.ContractsInputBoxDeploymentBlockNumber)) - s.Env = append(s.Env, fmt.Sprintf("HISTORY_ADDRESS=%v", c.ContractsHistoryAddress)) - s.Env = append(s.Env, fmt.Sprintf("AUTHORITY_ADDRESS=%v", c.ContractsAuthorityAddress)) - s.Env = append(s.Env, fmt.Sprintf("INPUT_BOX_ADDRESS=%v", c.ContractsInputBoxAddress)) - s.Env = append(s.Env, fmt.Sprintf("RD_EPOCH_LENGTH=%v", c.RollupsEpochLength)) - s.Env = append(s.Env, fmt.Sprintf("CHAIN_ID=%v", c.BlockchainID)) - s.Env = append(s.Env, fmt.Sprintf("DISPATCHER_HTTP_SERVER_PORT=%v", - getPort(c, portOffsetDispatcher))) - s.Env = append(s.Env, os.Environ()...) - s.WorkDir = workDir - return s -} - -func newGraphQLServer(c config.NodeConfig, workDir string) services.CommandService { - var s services.CommandService - s.Name = "graphql-server" - s.HealthcheckPort = getPort(c, portOffsetGraphQLHealthcheck) - s.Path = "cartesi-rollups-graphql-server" - s.Env = append(s.Env, "LOG_ENABLE_TIMESTAMP=false") - s.Env = append(s.Env, "LOG_ENABLE_COLOR=false") - s.Env = append(s.Env, getRustLog(c, "graphql_server")) - s.Env = append(s.Env, fmt.Sprintf("POSTGRES_ENDPOINT=%v", c.PostgresEndpoint.Value)) - s.Env = append(s.Env, fmt.Sprintf("GRAPHQL_HOST=%v", localhost)) - s.Env = append(s.Env, fmt.Sprintf("GRAPHQL_PORT=%v", getPort(c, portOffsetGraphQLServer))) - s.Env = append(s.Env, fmt.Sprintf("GRAPHQL_HEALTHCHECK_PORT=%v", - getPort(c, portOffsetGraphQLHealthcheck))) - s.Env = append(s.Env, os.Environ()...) - s.WorkDir = workDir - return s -} - -func newHostRunner(c config.NodeConfig, workDir string) services.CommandService { - var s services.CommandService - s.Name = "host-runner" - s.HealthcheckPort = getPort(c, portOffsetHostRunnerHealthcheck) - s.Path = "cartesi-rollups-host-runner" - s.Env = append(s.Env, "LOG_ENABLE_TIMESTAMP=false") - s.Env = append(s.Env, "LOG_ENABLE_COLOR=false") - s.Env = append(s.Env, getRustLog(c, "host_runner")) - s.Env = append(s.Env, fmt.Sprintf("GRPC_SERVER_MANAGER_ADDRESS=%v", localhost)) - s.Env = append(s.Env, fmt.Sprintf("GRPC_SERVER_MANAGER_PORT=%v", - getPort(c, portOffsetServerManager))) - s.Env = append(s.Env, fmt.Sprintf("HTTP_ROLLUP_SERVER_ADDRESS=%v", localhost)) - s.Env = append(s.Env, fmt.Sprintf("HTTP_ROLLUP_SERVER_PORT=%v", - getPort(c, portOffsetHostRunnerRollups))) - s.Env = append(s.Env, fmt.Sprintf("HOST_RUNNER_HEALTHCHECK_PORT=%v", - getPort(c, portOffsetHostRunnerHealthcheck))) - s.Env = append(s.Env, os.Environ()...) - s.WorkDir = workDir - return s -} - -func newIndexer(c config.NodeConfig, workdir string) services.CommandService { - var s services.CommandService - s.Name = "indexer" - s.HealthcheckPort = getPort(c, portOffsetIndexer) - s.Path = "cartesi-rollups-indexer" - s.Env = append(s.Env, "LOG_ENABLE_TIMESTAMP=false") - s.Env = append(s.Env, "LOG_ENABLE_COLOR=false") - s.Env = append(s.Env, getRustLog(c, "indexer")) - s.Env = append(s.Env, fmt.Sprintf("POSTGRES_ENDPOINT=%v", c.PostgresEndpoint.Value)) - s.Env = append(s.Env, fmt.Sprintf("CHAIN_ID=%v", c.BlockchainID)) - s.Env = append(s.Env, fmt.Sprintf("DAPP_CONTRACT_ADDRESS=%v", - c.ContractsApplicationAddress)) - s.Env = append(s.Env, fmt.Sprintf("REDIS_ENDPOINT=%v", getRedisEndpoint(c))) - s.Env = append(s.Env, fmt.Sprintf("INDEXER_HEALTHCHECK_PORT=%v", - getPort(c, portOffsetIndexer))) - s.Env = append(s.Env, os.Environ()...) - s.WorkDir = workdir - return s -} - -func newInspectServer(c config.NodeConfig, workDir string) services.CommandService { - var s services.CommandService - s.Name = "inspect-server" - s.HealthcheckPort = getPort(c, portOffsetInspectHealthcheck) - s.Path = "cartesi-rollups-inspect-server" - s.Env = append(s.Env, "LOG_ENABLE_TIMESTAMP=false") - s.Env = append(s.Env, "LOG_ENABLE_COLOR=false") - s.Env = append(s.Env, getRustLog(c, "inspect_server")) - s.Env = append(s.Env, fmt.Sprintf("INSPECT_SERVER_ADDRESS=%v:%v", localhost, - getPort(c, portOffsetInspectServer))) - s.Env = append(s.Env, fmt.Sprintf("SERVER_MANAGER_ADDRESS=%v:%v", localhost, - getPort(c, portOffsetServerManager))) - s.Env = append(s.Env, fmt.Sprintf("SESSION_ID=%v", serverManagerSessionId)) - s.Env = append(s.Env, fmt.Sprintf("INSPECT_SERVER_HEALTHCHECK_PORT=%v", - getPort(c, portOffsetInspectHealthcheck))) - s.Env = append(s.Env, os.Environ()...) - s.WorkDir = workDir - return s -} - func newRedis(c config.NodeConfig, workDir string) services.CommandService { var s services.CommandService s.Name = "redis" @@ -262,48 +110,6 @@ func newRedis(c config.NodeConfig, workDir string) services.CommandService { return s } -func newServerManager(c config.NodeConfig, workDir string) services.ServerManager { - var s services.ServerManager - s.Name = "server-manager" - s.HealthcheckPort = getPort(c, portOffsetServerManager) - s.Path = "server-manager" - s.Args = append(s.Args, - fmt.Sprintf("--manager-address=%v:%v", localhost, getPort(c, portOffsetServerManager))) - s.Env = append(s.Env, "REMOTE_CARTESI_MACHINE_LOG_LEVEL=info") - if c.LogLevel == slog.LevelDebug { - s.Env = append(s.Env, "SERVER_MANAGER_LOG_LEVEL=info") - } else { - s.Env = append(s.Env, "SERVER_MANAGER_LOG_LEVEL=warning") - } - s.Env = append(s.Env, os.Environ()...) - s.BypassLog = c.ExperimentalServerManagerBypassLog - s.WorkDir = workDir - return s -} - -func newStateServer(c config.NodeConfig, workDir string) services.CommandService { - var s services.CommandService - s.Name = "state-server" - s.HealthcheckPort = getPort(c, portOffsetStateServer) - s.Path = "cartesi-rollups-state-server" - s.Env = append(s.Env, "LOG_ENABLE_TIMESTAMP=false") - s.Env = append(s.Env, "LOG_ENABLE_COLOR=false") - s.Env = append(s.Env, getRustLog(c, "state_server")) - s.Env = append(s.Env, "SF_CONCURRENT_EVENTS_FETCH=1") - s.Env = append(s.Env, fmt.Sprintf("SF_GENESIS_BLOCK=%v", - c.ContractsInputBoxDeploymentBlockNumber)) - s.Env = append(s.Env, fmt.Sprintf("SF_SAFETY_MARGIN=%v", c.BlockchainFinalityOffset)) - s.Env = append(s.Env, fmt.Sprintf("BH_WS_ENDPOINT=%v", c.BlockchainWsEndpoint.Value)) - s.Env = append(s.Env, fmt.Sprintf("BH_HTTP_ENDPOINT=%v", - c.BlockchainHttpEndpoint.Value)) - s.Env = append(s.Env, fmt.Sprintf("BLOCKCHAIN_BLOCK_TIMEOUT=%v", c.BlockchainBlockTimeout)) - s.Env = append(s.Env, fmt.Sprintf("SS_SERVER_ADDRESS=%v:%v", localhost, - getPort(c, portOffsetStateServer))) - s.Env = append(s.Env, os.Environ()...) - s.WorkDir = workDir - return s -} - func newSupervisorService(c config.NodeConfig, workDir string) services.SupervisorService { var s []services.Service @@ -312,28 +118,11 @@ func newSupervisorService(c config.NodeConfig, workDir string) services.Supervis s = append(s, newRedis(c, workDir)) } - // add services without dependencies - s = append(s, newGraphQLServer(c, workDir)) - s = append(s, newIndexer(c, workDir)) - s = append(s, newStateServer(c, workDir)) - - // start either the server manager or host runner - if c.FeatureHostMode { - s = append(s, newHostRunner(c, workDir)) - } else { - s = append(s, newServerManager(c, workDir)) - } - // enable claimer if reader mode and sunodo validator mode are disabled if !c.FeatureDisableClaimer && !c.ExperimentalSunodoValidatorEnabled { s = append(s, newAuthorityClaimer(c, workDir)) } - // add services with dependencies - s = append(s, newAdvanceRunner(c, workDir)) // Depends on the server-manager/host-runner - s = append(s, newDispatcher(c, workDir)) // Depends on the state server - s = append(s, newInspectServer(c, workDir)) // Depends on the server-manager/host-runner - s = append(s, newHttpService(c)) supervisor := services.SupervisorService{ diff --git a/internal/services/server-manager.go b/internal/services/server-manager.go deleted file mode 100644 index 3ff61ac76..000000000 --- a/internal/services/server-manager.go +++ /dev/null @@ -1,140 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -package services - -import ( - "context" - "fmt" - "log/slog" - "net" - "os" - "os/exec" - "strconv" - "strings" - "syscall" - "time" -) - -// ServerManager is a variation of CommandService used to manually stop -// the orphaned cartesi-machines left after server-manager exits. -// For more information, check https://github.com/cartesi/server-manager/issues/18 -type ServerManager struct { - // Name that identifies the service. - Name string - - // Port used to verify if the service is ready. - HealthcheckPort int - - // Path to the service binary. - Path string - - // Args to the service binary. - Args []string - - // Environment variables. - Env []string - - // Bypass the log and write directly to stdout/stderr. - BypassLog bool - - // Working Directory - WorkDir string -} - -const waitDelay = 200 * time.Millisecond - -func (s ServerManager) Start(ctx context.Context, ready chan<- struct{}) error { - cmd := exec.CommandContext(ctx, s.Path, s.Args...) - cmd.Env = s.Env - if s.WorkDir != "" { - cmd.Dir = s.WorkDir - } - if s.BypassLog { - cmd.Stderr = os.Stderr - cmd.Stdout = os.Stdout - } else { - cmd.Stderr = newLineWriter(commandLogger{s.Name}) - cmd.Stdout = newLineWriter(commandLogger{s.Name}) - } - // Without a delay, cmd.Wait() will block forever waiting for the I/O pipes - // to be closed - cmd.WaitDelay = waitDelay - cmd.Cancel = func() error { - err := killChildProcesses(cmd.Process.Pid) - if err != nil { - slog.Warn("Failed to kill child processes", "service", s, "error", err) - } - err = cmd.Process.Signal(syscall.SIGTERM) - if err != nil { - slog.Warn("Failed to send SIGTERM", "service", s, "error", err) - } - return err - } - - go s.pollTcp(ctx, ready) - err := cmd.Run() - - if ctx.Err() != nil { - return ctx.Err() - } - return err -} - -// Blocks until the service is ready or the context is canceled -func (s ServerManager) pollTcp(ctx context.Context, ready chan<- struct{}) { - ctx, cancel := context.WithCancel(ctx) - defer cancel() - for { - conn, err := net.Dial("tcp", fmt.Sprintf("0.0.0.0:%v", s.HealthcheckPort)) - if err == nil { - slog.Debug("Service is ready", "service", s) - conn.Close() - ready <- struct{}{} - return - } - select { - case <-ctx.Done(): - return - case <-time.After(DefaultPollInterval): - } - } -} - -func (s ServerManager) String() string { - return s.Name -} - -// Kills all child processes spawned by pid -func killChildProcesses(pid int) error { - children, err := getChildrenPid(pid) - if err != nil { - return fmt.Errorf("failed to get child processes. %v", err) - } - for _, child := range children { - err = syscall.Kill(child, syscall.SIGKILL) - if err != nil { - return fmt.Errorf("failed to kill child process: %v. %v\n", child, err) - } - } - return nil -} - -// Returns a list of processes whose parent is ppid -func getChildrenPid(ppid int) ([]int, error) { - output, err := exec.Command("pgrep", "-P", fmt.Sprint(ppid)).CombinedOutput() - if err != nil { - return nil, fmt.Errorf("failed to exec pgrep: %v: %v", err, string(output)) - } - - var children []int - pids := strings.Split(strings.TrimSpace(string(output)), "\n") - for _, pid := range pids { - childPid, err := strconv.Atoi(pid) - if err != nil { - return nil, fmt.Errorf("failed to parse pid: %v", err) - } - children = append(children, childPid) - } - return children, nil -} diff --git a/offchain/.rustfmt.toml b/offchain/.rustfmt.toml deleted file mode 100644 index 0c2ca5c32..000000000 --- a/offchain/.rustfmt.toml +++ /dev/null @@ -1,2 +0,0 @@ -edition = "2018" -max_width = 80 diff --git a/offchain/Cargo.toml b/offchain/Cargo.toml deleted file mode 100644 index 3a36faef9..000000000 --- a/offchain/Cargo.toml +++ /dev/null @@ -1,96 +0,0 @@ -[workspace] -resolver = "2" -members = [ - "advance-runner", - "authority-claimer", - "contracts", - "data", - "dispatcher", - "graphql-server", - "grpc-interfaces", - "host-runner", - "http-health-check", - "http-server", - "indexer", - "inspect-server", - "log", - "redacted", - "rollups-events", - "rollups-http-client", - "state-server", - "test-fixtures", - "types", -] - -[workspace.package] -version = "1.4.0" -license = "Apache-2.0" -edition = "2021" - -# This list is sorted alphabetically. -[workspace.dependencies] -actix-cors = "0.7" -actix-web = "4.5" -anyhow = "1.0" -async-trait = "0.1" -awc = "3.4" -axum = "0.7" -backoff = "0.4" -base64 = "0.22" -built = "0.7" -byteorder = "1.5" -clap = "4.5" -diesel = "2.1" -diesel_migrations = "2.1" -env_logger = "0.11" -ethabi = "18.0" -eth-block-history = "0.9" -eth-state-client-lib = "0.9" -eth-state-fold-types = "0.9" -eth-state-fold = "0.9" -eth-state-server-lib = "0.9" -eth-tx-manager = "0.10" -ethers = "1.0" -ethers-signers = "1.0" -futures = "0.3" -futures-util = "0.3" -hex = "0.4" -hyper = "0.14" -im = "15" -json = "0.12" -juniper = "0.15" -log = "0.4" -mockall = "0.12" -prometheus-client = "0.22" -prost = "0.11" -rand = "0.8" -redis = "0.25" -regex = "1" -reqwest = "0.12" -rusoto_core = "0.48" -rusoto_kms = "0.48" -rusoto_sts = "0.48" -serde = "1" -serde_json = "1" -serial_test = "3.0" -sha3 = "0.10" -snafu = "0.8" -tempfile = "3.10" -testcontainers = "0.14" -test-log = "0.2" -tokio = "1" -tokio-stream = "0.1" -toml = "0.8" -tonic = "0.9" -tonic-build = "0.9" -tonic-health = "0.9" -tracing = "0.1" -tracing-actix-web = "0.7" -tracing-subscriber = "0.3" -tracing-test = "0.2" -url = "2" -users = "0.11" -uuid = "1.8" - -[profile.release] -strip = true # Automatically strip symbols from the binary. diff --git a/offchain/advance-runner/Cargo.toml b/offchain/advance-runner/Cargo.toml deleted file mode 100644 index 52cf7c038..000000000 --- a/offchain/advance-runner/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "advance-runner" -edition.workspace = true -license.workspace = true -version.workspace = true - -[[bin]] -name = "cartesi-rollups-advance-runner" -path = "src/main.rs" - -[dependencies] -grpc-interfaces = { path = "../grpc-interfaces" } -http-health-check = { path = "../http-health-check" } -log = { path = "../log" } -rollups-events = { path = "../rollups-events" } - -backoff = { workspace = true, features = ["tokio"] } -clap = { workspace = true, features = ["derive", "env"] } -hex.workspace = true -sha3 = { workspace = true, features = ["std"] } -snafu.workspace = true -tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } -tonic.workspace = true -tracing.workspace = true -uuid = { workspace = true, features = ["v4"] } - -[dev-dependencies] -test-fixtures = { path = "../test-fixtures" } - -env_logger.workspace = true -rand.workspace = true -tempfile.workspace = true -test-log = { workspace = true, features = ["trace"] } -testcontainers.workspace = true -tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/offchain/advance-runner/README.md b/offchain/advance-runner/README.md deleted file mode 100644 index 7e226210f..000000000 --- a/offchain/advance-runner/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Advance Runner - -This service consumes rollups input events from the broker and uses them to advance the server-manager state. -When the epoch finishes, the advance-runner gets the claim from the server-manager and produces the rollups claim event. diff --git a/offchain/advance-runner/src/broker.rs b/offchain/advance-runner/src/broker.rs deleted file mode 100644 index 1993b6e19..000000000 --- a/offchain/advance-runner/src/broker.rs +++ /dev/null @@ -1,303 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use rollups_events::{ - Broker, BrokerConfig, BrokerError, DAppMetadata, RollupsClaim, - RollupsClaimsStream, RollupsInput, RollupsInputsStream, RollupsOutput, - RollupsOutputsStream, INITIAL_ID, -}; -use snafu::{ResultExt, Snafu}; - -#[derive(Debug, Snafu)] -pub enum BrokerFacadeError { - #[snafu(display("broker internal error"))] - BrokerInternalError { source: BrokerError }, - - #[snafu(display("failed to consume input event"))] - ConsumeError { source: BrokerError }, - - #[snafu(display( - "failed to find finish epoch input event epoch={}", - epoch - ))] - FindFinishEpochInputError { epoch: u64 }, - - #[snafu(display("processed event not found in broker"))] - ProcessedEventNotFound {}, - - #[snafu(display( - "parent id doesn't match expected={} got={}", - expected, - got - ))] - ParentIdMismatchError { expected: String, got: String }, -} - -pub type Result = std::result::Result; - -pub struct BrokerFacade { - client: Broker, - inputs_stream: RollupsInputsStream, - outputs_stream: RollupsOutputsStream, - claims_stream: RollupsClaimsStream, - reader_mode: bool, - last_id: String, -} - -impl BrokerFacade { - #[tracing::instrument(level = "trace", skip_all)] - pub async fn new( - config: BrokerConfig, - dapp_metadata: DAppMetadata, - reader_mode: bool, - ) -> Result { - tracing::trace!(?config, "connecting to broker"); - let client = Broker::new(config).await.context(BrokerInternalSnafu)?; - let inputs_stream = RollupsInputsStream::new(&dapp_metadata); - let outputs_stream = RollupsOutputsStream::new(&dapp_metadata); - let claims_stream = RollupsClaimsStream::new(dapp_metadata.chain_id); - Ok(Self { - client, - inputs_stream, - outputs_stream, - claims_stream, - reader_mode, - last_id: INITIAL_ID.to_owned(), - }) - } - - /// Consume rollups input event - #[tracing::instrument(level = "trace", skip_all)] - pub async fn consume_input(&mut self) -> Result { - tracing::trace!(self.last_id, "consuming rollups input event"); - let event = self - .client - .consume_blocking(&self.inputs_stream, &self.last_id) - .await - .context(BrokerInternalSnafu)?; - if event.payload.parent_id != self.last_id { - Err(BrokerFacadeError::ParentIdMismatchError { - expected: self.last_id.to_owned(), - got: event.payload.parent_id, - }) - } else { - self.last_id = event.id; - Ok(event.payload) - } - } - - /// Produce the rollups claim if it isn't in the stream yet - #[tracing::instrument(level = "trace", skip_all)] - pub async fn produce_rollups_claim( - &mut self, - rollups_claim: RollupsClaim, - ) -> Result<()> { - if self.reader_mode { - return Ok(()); - } - - tracing::trace!(rollups_claim.epoch_index, - ?rollups_claim.epoch_hash, - "producing rollups claim" - ); - - let result = self - .client - .peek_latest(&self.claims_stream) - .await - .context(BrokerInternalSnafu)?; - - let claim_produced = match result { - Some(event) => { - tracing::trace!(?event, "got last claim produced"); - rollups_claim.epoch_index <= event.payload.epoch_index - } - None => { - tracing::trace!("no claims in the stream"); - false - } - }; - - if !claim_produced { - self.client - .produce(&self.claims_stream, rollups_claim) - .await - .context(BrokerInternalSnafu)?; - } - - Ok(()) - } - - /// Produce outputs to the rollups-outputs stream - #[tracing::instrument(level = "trace", skip_all)] - pub async fn produce_outputs( - &mut self, - outputs: Vec, - ) -> Result<()> { - tracing::trace!(?outputs, "producing rollups outputs"); - - for output in outputs { - self.client - .produce(&self.outputs_stream, output) - .await - .context(BrokerInternalSnafu)?; - } - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use super::*; - use backoff::ExponentialBackoff; - use rollups_events::{ - Address, DAppMetadata, Hash, InputMetadata, Payload, - RollupsAdvanceStateInput, RollupsData, ADDRESS_SIZE, HASH_SIZE, - }; - use test_fixtures::BrokerFixture; - use testcontainers::clients::Cli; - - struct TestState<'d> { - fixture: BrokerFixture<'d>, - facade: BrokerFacade, - } - - impl TestState<'_> { - async fn setup(docker: &Cli) -> TestState<'_> { - let fixture = BrokerFixture::setup(docker).await; - let backoff = ExponentialBackoff::default(); - let dapp_metadata = DAppMetadata { - chain_id: fixture.chain_id(), - dapp_address: fixture.dapp_address().to_owned(), - }; - let config = BrokerConfig { - redis_endpoint: fixture.redis_endpoint().to_owned(), - consume_timeout: 10, - backoff, - }; - let facade = BrokerFacade::new(config, dapp_metadata, false) - .await - .expect("failed to create broker facade"); - TestState { fixture, facade } - } - } - - #[test_log::test(tokio::test)] - async fn test_it_consumes_inputs() { - let docker = Cli::default(); - let mut state = TestState::setup(&docker).await; - let inputs = vec![ - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - epoch_index: 0, - input_index: 0, - ..Default::default() - }, - payload: Payload::new(vec![0, 0]), - tx_hash: Hash::default(), - }), - RollupsData::FinishEpoch {}, - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - epoch_index: 1, - input_index: 1, - ..Default::default() - }, - payload: Payload::new(vec![1, 1]), - tx_hash: Hash::default(), - }), - ]; - let mut ids = Vec::new(); - for input in inputs.iter() { - ids.push(state.fixture.produce_input_event(input.clone()).await); - } - assert_eq!( - state.facade.consume_input().await.unwrap(), - RollupsInput { - parent_id: INITIAL_ID.to_owned(), - epoch_index: 0, - inputs_sent_count: 1, - data: inputs[0].clone(), - }, - ); - assert_eq!( - state.facade.consume_input().await.unwrap(), - RollupsInput { - parent_id: ids[0].clone(), - epoch_index: 0, - inputs_sent_count: 1, - data: inputs[1].clone(), - }, - ); - assert_eq!( - state.facade.consume_input().await.unwrap(), - RollupsInput { - parent_id: ids[1].clone(), - epoch_index: 1, - inputs_sent_count: 2, - data: inputs[2].clone(), - }, - ); - } - - #[test_log::test(tokio::test)] - async fn test_it_does_not_produce_claim_when_it_was_already_produced() { - let docker = Cli::default(); - let mut state = TestState::setup(&docker).await; - let rollups_claim = RollupsClaim { - dapp_address: Address::new([0xa0; ADDRESS_SIZE]), - epoch_index: 0, - epoch_hash: Hash::new([0xb0; HASH_SIZE]), - first_index: 0, - last_index: 6, - }; - state - .fixture - .produce_rollups_claim(rollups_claim.clone()) - .await; - state - .facade - .produce_rollups_claim(rollups_claim.clone()) - .await - .unwrap(); - assert_eq!( - state.fixture.consume_all_claims().await, - vec![rollups_claim] - ); - } - - #[test_log::test(tokio::test)] - async fn test_it_produces_claims() { - let docker = Cli::default(); - let mut state = TestState::setup(&docker).await; - let rollups_claim0 = RollupsClaim { - dapp_address: Address::new([0xa0; ADDRESS_SIZE]), - epoch_index: 0, - epoch_hash: Hash::new([0xb0; HASH_SIZE]), - first_index: 0, - last_index: 0, - }; - let rollups_claim1 = RollupsClaim { - dapp_address: Address::new([0xa1; ADDRESS_SIZE]), - epoch_index: 1, - epoch_hash: Hash::new([0xb1; HASH_SIZE]), - first_index: 1, - last_index: 1, - }; - state - .facade - .produce_rollups_claim(rollups_claim0.clone()) - .await - .unwrap(); - state - .facade - .produce_rollups_claim(rollups_claim1.clone()) - .await - .unwrap(); - assert_eq!( - state.fixture.consume_all_claims().await, - vec![rollups_claim0, rollups_claim1] - ); - } -} diff --git a/offchain/advance-runner/src/config.rs b/offchain/advance-runner/src/config.rs deleted file mode 100644 index 3d16e4f4f..000000000 --- a/offchain/advance-runner/src/config.rs +++ /dev/null @@ -1,85 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; -use std::time::Duration; - -use crate::server_manager::ServerManagerCLIConfig; -pub use crate::server_manager::ServerManagerConfig; -use log::{LogConfig, LogEnvCliConfig}; -pub use rollups_events::{ - BrokerCLIConfig, BrokerConfig, DAppMetadata, DAppMetadataCLIConfig, -}; - -#[derive(Debug, Clone)] -pub struct AdvanceRunnerConfig { - pub server_manager_config: ServerManagerConfig, - pub broker_config: BrokerConfig, - pub dapp_metadata: DAppMetadata, - pub log_config: LogConfig, - pub backoff_max_elapsed_duration: Duration, - pub healthcheck_port: u16, - pub reader_mode: bool, -} - -impl AdvanceRunnerConfig { - pub fn parse() -> Self { - let cli_config = CLIConfig::parse(); - let broker_config = cli_config.broker_cli_config.into(); - let dapp_metadata: DAppMetadata = - cli_config.dapp_metadata_cli_config.into(); - let server_manager_config = - ServerManagerConfig::parse_from_cli(cli_config.sm_cli_config); - - let log_config = LogConfig::initialize(cli_config.log_cli_config); - - let backoff_max_elapsed_duration = - Duration::from_millis(cli_config.backoff_max_elapsed_duration); - - let healthcheck_port = cli_config.healthcheck_port; - - let reader_mode = cli_config.reader_mode; - - Self { - server_manager_config, - broker_config, - dapp_metadata, - log_config, - backoff_max_elapsed_duration, - healthcheck_port, - reader_mode, - } - } -} - -#[derive(Parser)] -#[command(name = "advance_runner_config")] -#[command(about = "Configuration for advance-runner")] -struct CLIConfig { - #[command(flatten)] - sm_cli_config: ServerManagerCLIConfig, - - #[command(flatten)] - broker_cli_config: BrokerCLIConfig, - - #[command(flatten)] - dapp_metadata_cli_config: DAppMetadataCLIConfig, - - #[command(flatten)] - pub log_cli_config: LogEnvCliConfig, - - /// The max elapsed time for backoff in ms - #[arg(long, env, default_value = "120000")] - backoff_max_elapsed_duration: u64, - - /// Port of health check - #[arg( - long, - env = "ADVANCE_RUNNER_HEALTHCHECK_PORT", - default_value_t = 8080 - )] - pub healthcheck_port: u16, - - #[arg(long, env)] - reader_mode: bool, -} diff --git a/offchain/advance-runner/src/error.rs b/offchain/advance-runner/src/error.rs deleted file mode 100644 index e8d0cd4e6..000000000 --- a/offchain/advance-runner/src/error.rs +++ /dev/null @@ -1,26 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use snafu::Snafu; - -use crate::{broker, runner, server_manager}; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] -pub enum AdvanceRunnerError { - #[snafu(display("health check error"))] - HealthCheckError { - source: http_health_check::HealthCheckError, - }, - - #[snafu(display("server manager error"))] - ServerManagerError { - source: server_manager::ServerManagerError, - }, - - #[snafu(display("broker error"))] - BrokerError { source: broker::BrokerFacadeError }, - - #[snafu(display("runner error"))] - RunnerError { source: runner::RunnerError }, -} diff --git a/offchain/advance-runner/src/lib.rs b/offchain/advance-runner/src/lib.rs deleted file mode 100644 index b1a2366d9..000000000 --- a/offchain/advance-runner/src/lib.rs +++ /dev/null @@ -1,66 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use backoff::ExponentialBackoffBuilder; -use broker::BrokerFacade; -use config::AdvanceRunnerConfig; -use runner::Runner; -use server_manager::ServerManagerFacade; -use snafu::ResultExt; - -pub use broker::BrokerFacadeError; -pub use error::AdvanceRunnerError; -pub use runner::RunnerError; - -mod broker; -pub mod config; -mod error; -pub mod runner; -mod server_manager; - -#[tracing::instrument(level = "trace", skip_all)] -pub async fn run( - config: AdvanceRunnerConfig, -) -> Result<(), AdvanceRunnerError> { - let health_handle = http_health_check::start(config.healthcheck_port); - let advance_runner_handle = start_advance_runner(config); - tokio::select! { - ret = health_handle => { - ret.context(error::HealthCheckSnafu) - } - ret = advance_runner_handle => { - ret - } - } -} - -#[tracing::instrument(level = "trace", skip_all)] -async fn start_advance_runner( - config: AdvanceRunnerConfig, -) -> Result<(), AdvanceRunnerError> { - let backoff = ExponentialBackoffBuilder::new() - .with_max_elapsed_time(Some(config.backoff_max_elapsed_duration)) - .build(); - - let server_manager = ServerManagerFacade::new( - config.dapp_metadata.dapp_address.clone(), - config.server_manager_config, - backoff, - ) - .await - .context(error::ServerManagerSnafu)?; - tracing::trace!("connected to the server-manager"); - - let broker = BrokerFacade::new( - config.broker_config, - config.dapp_metadata, - config.reader_mode, - ) - .await - .context(error::BrokerSnafu)?; - tracing::trace!("connected the broker"); - - Runner::start(server_manager, broker) - .await - .context(error::RunnerSnafu) -} diff --git a/offchain/advance-runner/src/main.rs b/offchain/advance-runner/src/main.rs deleted file mode 100644 index 60826dd3f..000000000 --- a/offchain/advance-runner/src/main.rs +++ /dev/null @@ -1,15 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use advance_runner::config::AdvanceRunnerConfig; - -#[tokio::main] -async fn main() -> Result<(), Box> { - let config = AdvanceRunnerConfig::parse(); - - log::configure(&config.log_config); - - log::log_service_start(&config, "Advance Runner"); - - advance_runner::run(config).await.map_err(|e| e.into()) -} diff --git a/offchain/advance-runner/src/runner.rs b/offchain/advance-runner/src/runner.rs deleted file mode 100644 index ccd09479b..000000000 --- a/offchain/advance-runner/src/runner.rs +++ /dev/null @@ -1,146 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use rollups_events::{InputMetadata, RollupsData}; -use snafu::{ResultExt, Snafu}; - -use crate::broker::{BrokerFacade, BrokerFacadeError}; -use crate::server_manager::{ServerManagerError, ServerManagerFacade}; - -#[derive(Debug, Snafu)] -pub enum RunnerError { - #[snafu(display("failed to send advance-state input to server-manager"))] - AdvanceError { source: ServerManagerError }, - - #[snafu(display("failed to finish epoch in server-manager"))] - FinishEpochError { source: ServerManagerError }, - - #[snafu(display("failed to get epoch claim from server-manager"))] - GetEpochClaimError { source: ServerManagerError }, - - #[snafu(display("failed to find finish epoch input event"))] - FindFinishEpochInputError { source: BrokerFacadeError }, - - #[snafu(display("failed to consume input from broker"))] - ConsumeInputError { source: BrokerFacadeError }, - - #[snafu(display("failed to get whether claim was produced"))] - PeekClaimError { source: BrokerFacadeError }, - - #[snafu(display("failed to produce claim in broker"))] - ProduceClaimError { source: BrokerFacadeError }, - - #[snafu(display("failed to produce outputs in broker"))] - ProduceOutputsError { source: BrokerFacadeError }, -} - -type Result = std::result::Result; - -pub struct Runner { - server_manager: ServerManagerFacade, - broker: BrokerFacade, -} - -impl Runner { - #[tracing::instrument(level = "trace", skip_all)] - pub async fn start( - server_manager: ServerManagerFacade, - broker: BrokerFacade, - ) -> Result<()> { - let mut runner = Self { - server_manager, - broker, - }; - - tracing::info!("starting runner main loop"); - loop { - let event = runner - .broker - .consume_input() - .await - .context(ConsumeInputSnafu)?; - tracing::info!(?event, "consumed input event"); - - match event.data { - RollupsData::AdvanceStateInput(input) => { - runner - .handle_advance( - event.epoch_index, - event.inputs_sent_count, - input.metadata, - input.payload.into_inner(), - ) - .await?; - } - RollupsData::FinishEpoch {} => { - runner.handle_finish(event.epoch_index).await?; - } - } - tracing::info!("waiting for the next input event"); - } - } - - #[tracing::instrument(level = "trace", skip_all)] - async fn handle_advance( - &mut self, - epoch_index: u64, - inputs_sent_count: u64, - input_metadata: InputMetadata, - input_payload: Vec, - ) -> Result<()> { - tracing::trace!("handling advance state"); - - let input_index = inputs_sent_count - 1; - let outputs = self - .server_manager - .advance_state( - epoch_index, - input_index, - input_metadata, - input_payload, - ) - .await - .context(AdvanceSnafu)?; - tracing::trace!("advance state sent to server-manager"); - - self.broker - .produce_outputs(outputs) - .await - .context(ProduceOutputsSnafu)?; - tracing::trace!("produced outputs in broker"); - - Ok(()) - } - - #[tracing::instrument(level = "trace", skip_all)] - async fn handle_finish(&mut self, epoch_index: u64) -> Result<()> { - tracing::trace!("handling finish"); - - let result = self.server_manager.finish_epoch(epoch_index).await; - tracing::trace!("finished epoch in server-manager"); - - match result { - Ok((rollups_claim, proofs)) => { - self.broker - .produce_outputs(proofs) - .await - .context(ProduceOutputsSnafu)?; - tracing::trace!("produced outputs in broker"); - - self.broker - .produce_rollups_claim(rollups_claim) - .await - .context(ProduceClaimSnafu)?; - tracing::info!("produced epoch claim"); - } - Err(source) => { - if let ServerManagerError::EmptyEpochError { .. } = source { - tracing::warn!("{}", source) - } else { - return Err(RunnerError::FinishEpochError { source }); - } - } - } - Ok(()) - } -} diff --git a/offchain/advance-runner/src/server_manager/claim.rs b/offchain/advance-runner/src/server_manager/claim.rs deleted file mode 100644 index a9d958213..000000000 --- a/offchain/advance-runner/src/server_manager/claim.rs +++ /dev/null @@ -1,45 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use rollups_events::{Hash, HASH_SIZE}; -use sha3::{Digest, Keccak256}; - -pub fn compute_epoch_hash( - machine_state_hash: &Hash, - vouchers_metadata_hash: &Hash, - notices_metadata_hash: &Hash, -) -> Hash { - let mut hasher = Keccak256::new(); - hasher.update(machine_state_hash.inner()); - hasher.update(vouchers_metadata_hash.inner()); - hasher.update(notices_metadata_hash.inner()); - let data: [u8; HASH_SIZE] = hasher.finalize().into(); - Hash::new(data) -} - -#[cfg(test)] -mod tests { - use super::{compute_epoch_hash, Hash}; - - #[test_log::test] - fn test_claim_hash() { - let hash = Hash::new( - hex::decode( - "ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff", - ) - .unwrap() - .try_into() - .unwrap() - ); - let claim = compute_epoch_hash(&hash, &hash, &hash); - let expected = Hash::new( - hex::decode( - "8590bbc3ea43e28e8624fb1a2d59aaca701a5517e08511c4a14d9037de6f6086", - ) - .unwrap() - .try_into() - .unwrap() - ); - assert_eq!(expected, claim); - } -} diff --git a/offchain/advance-runner/src/server_manager/config.rs b/offchain/advance-runner/src/server_manager/config.rs deleted file mode 100644 index 642e7daea..000000000 --- a/offchain/advance-runner/src/server_manager/config.rs +++ /dev/null @@ -1,149 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; - -use grpc_interfaces::cartesi_machine::{ - ConcurrencyConfig, MachineRuntimeConfig, -}; -use grpc_interfaces::cartesi_server_manager::{CyclesConfig, DeadlineConfig}; - -#[derive(Debug, Clone)] -pub struct ServerManagerConfig { - pub server_manager_endpoint: String, - pub machine_snapshot_path: String, - pub max_decoding_message_size: usize, - pub session_id: String, - pub pending_inputs_sleep_duration: u64, - pub pending_inputs_max_retries: u64, - pub runtime_config: MachineRuntimeConfig, - pub deadline_config: DeadlineConfig, - pub cycles_config: CyclesConfig, -} - -impl ServerManagerConfig { - pub fn parse_from_cli(cli_config: ServerManagerCLIConfig) -> Self { - let runtime_config = MachineRuntimeConfig { - concurrency: Some(ConcurrencyConfig { - update_merkle_tree: cli_config - .sm_concurrency_update_merkle_tree, - }), - }; - - let deadline_config = DeadlineConfig { - checkin: cli_config.sm_deadline_checkin, - advance_state: cli_config.sm_deadline_advance_state, - advance_state_increment: cli_config - .sm_deadline_advance_state_increment, - inspect_state: cli_config.sm_deadline_inspect_state, - inspect_state_increment: cli_config - .sm_deadline_inspect_state_increment, - machine: cli_config.sm_deadline_machine, - store: cli_config.sm_deadline_store, - fast: cli_config.sm_deadline_fast, - }; - - let cycles_config = CyclesConfig { - max_advance_state: cli_config.sm_cycles_max_advance_state, - advance_state_increment: cli_config - .sm_cycles_advance_state_increment, - max_inspect_state: cli_config.sm_cycles_max_inspect_state, - inspect_state_increment: cli_config - .sm_cycles_inspect_state_increment, - }; - - Self { - server_manager_endpoint: cli_config.server_manager_endpoint, - machine_snapshot_path: cli_config.machine_snapshot_path, - max_decoding_message_size: cli_config.max_decoding_message_size, - session_id: cli_config.session_id, - pending_inputs_sleep_duration: cli_config - .sm_pending_inputs_sleep_duration, - pending_inputs_max_retries: cli_config - .sm_pending_inputs_max_retries, - runtime_config, - deadline_config, - cycles_config, - } - } -} - -#[derive(Debug, Parser)] -#[command(name = "server_manager_config")] -pub struct ServerManagerCLIConfig { - /// Server-manager gRPC endpoint - #[arg(long, env, default_value = "http://127.0.0.1:5001")] - pub server_manager_endpoint: String, - - /// Path to the machine snapshot - #[arg(long, env, default_value = "")] - pub machine_snapshot_path: String, - - /// Maximum size of a decoded message - #[arg(long, env, default_value_t = 100 * 1024 * 1024)] - pub max_decoding_message_size: usize, - - /// Server-manager session id - #[arg(long, env, default_value = "default_rollups_id")] - pub session_id: String, - - /// Sleep duration while polling for server-manager pending inputs (in millis) - #[arg(long, env, default_value_t = 1000)] - pub sm_pending_inputs_sleep_duration: u64, - - /// Max number of retries while polling server-manager for pending inputs - #[arg(long, env, default_value_t = 600)] - pub sm_pending_inputs_max_retries: u64, - - /// Defines the number of threads to use while calculating the merkle tree - #[arg(long, env, default_value_t = 0)] - pub sm_concurrency_update_merkle_tree: u64, - - /// Deadline for receiving checkin from spawned machine server - #[arg(long, env, default_value_t = 5 * 1000)] - pub sm_deadline_checkin: u64, - - /// Deadline for advancing the state - #[arg(long, env, default_value_t = 1000 * 60 * 3)] - pub sm_deadline_advance_state: u64, - - /// Deadline for each increment when advancing state - #[arg(long, env, default_value_t = 1000 * 10)] - pub sm_deadline_advance_state_increment: u64, - - /// Deadline for inspecting state - #[arg(long, env, default_value_t = 1000 * 60 * 3)] - pub sm_deadline_inspect_state: u64, - - /// Deadline for each increment when inspecting state - #[arg(long, env, default_value_t = 1000 * 10)] - pub sm_deadline_inspect_state_increment: u64, - - /// Deadline for instantiating a machine - #[arg(long, env, default_value_t = 1000 * 60 * 5)] - pub sm_deadline_machine: u64, - - /// Deadline for storing a machine - #[arg(long, env, default_value_t = 1000 * 60 * 3)] - pub sm_deadline_store: u64, - - /// Deadline for quick machine server tasks - #[arg(long, env, default_value_t = 1000 * 5)] - pub sm_deadline_fast: u64, - - /// Maximum number of cycles that processing the input in an AdvanceState can take - #[arg(long, env, default_value_t = u64::MAX >> 2)] - pub sm_cycles_max_advance_state: u64, - - /// Number of cycles in each increment to processing an input - #[arg(long, env, default_value_t = 1 << 22)] - pub sm_cycles_advance_state_increment: u64, - - /// Maximum number of cycles that processing the query in an InspectState can take - #[arg(long, env, default_value_t = u64::MAX >> 2)] - pub sm_cycles_max_inspect_state: u64, - - /// Number of cycles in each increment to processing a query - #[arg(long, env, default_value_t = 1 << 22)] - pub sm_cycles_inspect_state_increment: u64, -} diff --git a/offchain/advance-runner/src/server_manager/conversions.rs b/offchain/advance-runner/src/server_manager/conversions.rs deleted file mode 100644 index bd05231ec..000000000 --- a/offchain/advance-runner/src/server_manager/conversions.rs +++ /dev/null @@ -1,135 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -//! This module contains functions to convert from gRPC types to -//! rollups-events types -use grpc_interfaces::cartesi_machine::Hash; -use grpc_interfaces::cartesi_server_manager::{ - Address, CompletionStatus, OutputEnum, OutputValidityProof, Proof, -}; -use rollups_events::{ - Address as RollupsAddress, Hash as RollupsHash, Payload, - RollupsCompletionStatus, RollupsOutputEnum, RollupsOutputValidityProof, - RollupsProof, ADDRESS_SIZE, HASH_SIZE, -}; - -use super::error::ServerManagerError; - -/// Try to get the field from an option, otherwise return an error -macro_rules! get_field { - ($field: expr) => { - match $field { - Some(value) => value, - None => { - return Err( - super::error::ServerManagerError::MissingFieldError { - name: stringify!($field).to_owned(), - }, - ); - } - } - }; -} - -// Export the get_field macro for other modules to use -pub(super) use get_field; - -/// Convert gRPC completion status to broker equivalent -pub fn convert_completion_status( - status: CompletionStatus, -) -> RollupsCompletionStatus { - match status { - CompletionStatus::Accepted => RollupsCompletionStatus::Accepted, - CompletionStatus::Rejected => RollupsCompletionStatus::Rejected, - CompletionStatus::Exception => RollupsCompletionStatus::Exception, - CompletionStatus::MachineHalted => { - RollupsCompletionStatus::MachineHalted - } - CompletionStatus::CycleLimitExceeded => { - RollupsCompletionStatus::CycleLimitExceeded - } - CompletionStatus::TimeLimitExceeded => { - RollupsCompletionStatus::TimeLimitExceeded - } - CompletionStatus::PayloadLengthLimitExceeded => { - RollupsCompletionStatus::PayloadLengthLimitExceeded - } - } -} - -/// Convert gRPC hash to broker equivalent -pub fn convert_hash(hash: Hash) -> Result { - hash.data.try_into().map(RollupsHash::new).map_err(|data| { - ServerManagerError::WrongArraySizeError { - name: "hash".to_owned(), - expected: HASH_SIZE, - got: data.len(), - } - }) -} - -/// Convert gRPC address to broker equivalent -pub fn convert_address( - address: Address, -) -> Result { - address - .data - .try_into() - .map(RollupsAddress::new) - .map_err(|data| ServerManagerError::WrongArraySizeError { - name: "address".to_owned(), - expected: ADDRESS_SIZE, - got: data.len(), - }) -} - -/// Convert from gRPC proof to broker equivalent -pub fn convert_proof(proof: Proof) -> Result { - let output_enum = match proof.output_enum() { - OutputEnum::Voucher => RollupsOutputEnum::Voucher, - OutputEnum::Notice => RollupsOutputEnum::Notice, - }; - let validity = convert_validity(get_field!(proof.validity))?; - let context = Payload::new(proof.context); - Ok(RollupsProof { - input_index: proof.input_index, - output_index: proof.output_index, - output_enum, - validity, - context, - }) -} - -/// Convert from gRPC output validity proof to broker equivalent -fn convert_validity( - validity: OutputValidityProof, -) -> Result { - let output_hashes_root_hash = - convert_hash(get_field!(validity.output_hashes_root_hash))?; - let vouchers_epoch_root_hash = - convert_hash(get_field!(validity.vouchers_epoch_root_hash))?; - let notices_epoch_root_hash = - convert_hash(get_field!(validity.notices_epoch_root_hash))?; - let machine_state_hash = - convert_hash(get_field!(validity.machine_state_hash))?; - let output_hash_in_output_hashes_siblings = validity - .output_hash_in_output_hashes_siblings - .into_iter() - .map(convert_hash) - .collect::, ServerManagerError>>()?; - let output_hashes_in_epoch_siblings = validity - .output_hashes_in_epoch_siblings - .into_iter() - .map(convert_hash) - .collect::, ServerManagerError>>()?; - Ok(RollupsOutputValidityProof { - input_index_within_epoch: validity.input_index_within_epoch, - output_index_within_input: validity.output_index_within_input, - output_hashes_root_hash, - vouchers_epoch_root_hash, - notices_epoch_root_hash, - machine_state_hash, - output_hash_in_output_hashes_siblings, - output_hashes_in_epoch_siblings, - }) -} diff --git a/offchain/advance-runner/src/server_manager/error.rs b/offchain/advance-runner/src/server_manager/error.rs deleted file mode 100644 index d3b5bdf68..000000000 --- a/offchain/advance-runner/src/server_manager/error.rs +++ /dev/null @@ -1,57 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -#[allow(clippy::enum_variant_names)] -pub enum ServerManagerError { - #[snafu(display("failed to connect to server-manager"))] - ConnectionError { source: tonic::transport::Error }, - - #[snafu(display( - "failed to call {} with request-id {}", - method, - request_id - ))] - MethodCallError { - method: String, - request_id: String, - source: tonic::Status, - }, - - #[snafu(display("maximum number of retries exceeded"))] - PendingInputsExceededError {}, - - #[snafu(display("missing field {}", name))] - MissingFieldError { name: String }, - - #[snafu(display( - "array of wrong size for {} type, expected {} but got {}", - name, - expected, - got - ))] - WrongArraySizeError { - name: String, - expected: usize, - got: usize, - }, - - #[snafu(display("missing processed input in get epoch status"))] - MissingProcessedInputError {}, - - #[snafu(display( - "invalid last processed input index, expected {} but got {}", - expected, - got - ))] - InvalidProcessedInputError { expected: u64, got: u64 }, - - #[snafu(display( - "can't generate claim for epoch {} because it has no inputs", - epoch_index - ))] - EmptyEpochError { epoch_index: u64 }, -} diff --git a/offchain/advance-runner/src/server_manager/facade.rs b/offchain/advance-runner/src/server_manager/facade.rs deleted file mode 100644 index c37375635..000000000 --- a/offchain/advance-runner/src/server_manager/facade.rs +++ /dev/null @@ -1,371 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use backoff::{future::retry, Error, ExponentialBackoff}; -use rollups_events::{ - InputMetadata as RollupsInputMetadata, Payload, RollupsAdvanceResult, - RollupsClaim, RollupsNotice, RollupsOutput, RollupsReport, RollupsVoucher, -}; -use snafu::{OptionExt, ResultExt}; -use tonic::{transport::Channel, Request}; -use uuid::Uuid; - -use grpc_interfaces::cartesi_machine::Void; -use grpc_interfaces::cartesi_server_manager::server_manager_client::ServerManagerClient; -use grpc_interfaces::cartesi_server_manager::{ - processed_input::ProcessedInputOneOf, Address, AdvanceStateRequest, - EndSessionRequest, FinishEpochRequest, GetEpochStatusRequest, - GetSessionStatusRequest, InputMetadata, ProcessedInput, - StartSessionRequest, -}; - -use super::claim::compute_epoch_hash; -use super::config::ServerManagerConfig; -use super::conversions::{ - convert_address, convert_completion_status, convert_hash, convert_proof, - get_field, -}; -use super::error::{ - ConnectionSnafu, EmptyEpochSnafu, InvalidProcessedInputSnafu, - ServerManagerError, -}; - -/// Call the grpc method passing an unique request-id and with retry -macro_rules! grpc_call { - ($self: ident, $method: ident, $request: expr) => { - retry($self.backoff.clone(), || async { - let request_id = Uuid::new_v4().to_string(); - let request = $request; - - tracing::trace!( - request_id, - method = stringify!($method), - ?request, - "calling grpc" - ); - - let mut grpc_request = Request::new(request); - grpc_request - .metadata_mut() - .insert("request-id", request_id.parse().unwrap()); - - let response = $self.client.clone().$method(grpc_request).await; - - tracing::trace!( - request_id, - method = stringify!($method), - ?response, - "got grpc response", - ); - - response.map(|v| v.into_inner()).map_err(|status| { - let err_type = match status.code() { - tonic::Code::InvalidArgument => Error::Permanent, - tonic::Code::NotFound => Error::Permanent, - tonic::Code::AlreadyExists => Error::Permanent, - tonic::Code::FailedPrecondition => Error::Permanent, - tonic::Code::OutOfRange => Error::Permanent, - tonic::Code::Unimplemented => Error::Permanent, - tonic::Code::DataLoss => Error::Permanent, - _ => Error::transient, - }; - err_type(ServerManagerError::MethodCallError { - source: status, - method: stringify!($method).to_owned(), - request_id, - }) - }) - }) - .await - }; -} - -pub type Result = std::result::Result; - -pub struct ServerManagerFacade { - dapp_address: rollups_events::Address, - client: ServerManagerClient, - config: ServerManagerConfig, - backoff: ExponentialBackoff, -} - -impl ServerManagerFacade { - #[tracing::instrument(level = "trace", skip_all)] - pub async fn new( - dapp_address: rollups_events::Address, - config: ServerManagerConfig, - backoff: ExponentialBackoff, - ) -> Result { - tracing::trace!(?config, "connecting to server manager"); - - let client = retry(backoff.clone(), || async { - ServerManagerClient::connect(config.server_manager_endpoint.clone()) - .await - .map_err(Error::transient) - }) - .await - .context(ConnectionSnafu)? - .max_decoding_message_size(config.max_decoding_message_size); - - let mut sm_facade = Self { - dapp_address, - client, - config, - backoff, - }; - - // If session exists, delete it before creating new one - let response = grpc_call!(sm_facade, get_status, Void {})?; - if response.session_id.contains(&sm_facade.config.session_id) { - tracing::warn!("deleting previous server-manager session"); - let session_status = grpc_call!( - sm_facade, - get_session_status, - GetSessionStatusRequest { - session_id: sm_facade.config.session_id.clone(), - } - )?; - let active_epoch_index = session_status.active_epoch_index; - let processed_input_count_within_epoch = sm_facade - .wait_for_pending_inputs(active_epoch_index) - .await? - .len() - as u64; - grpc_call!( - sm_facade, - finish_epoch, - FinishEpochRequest { - session_id: sm_facade.config.session_id.clone(), - active_epoch_index, - processed_input_count_within_epoch, - storage_directory: "".to_string(), - } - )?; - grpc_call!( - sm_facade, - end_session, - EndSessionRequest { - session_id: sm_facade.config.session_id.clone(), - } - )?; - } - - tracing::trace!("starting server-manager session"); - - grpc_call!(sm_facade, start_session, { - StartSessionRequest { - session_id: sm_facade.config.session_id.clone(), - machine_directory: sm_facade - .config - .machine_snapshot_path - .clone(), - runtime: Some(sm_facade.config.runtime_config.clone()), - active_epoch_index: 0, - processed_input_count: 0, - server_cycles: Some(sm_facade.config.cycles_config.clone()), - server_deadline: Some(sm_facade.config.deadline_config.clone()), - } - })?; - - Ok(sm_facade) - } - - #[tracing::instrument(level = "trace", skip_all)] - pub async fn advance_state( - &mut self, - active_epoch_index: u64, - current_input_index: u64, - input_metadata: RollupsInputMetadata, - input_payload: Vec, - ) -> Result> { - tracing::trace!("sending advance-state input to server-manager"); - - grpc_call!(self, advance_state, { - let input_metadata = InputMetadata { - msg_sender: Some(Address { - data: (*input_metadata.msg_sender.inner()).into(), - }), - block_number: input_metadata.block_number, - timestamp: input_metadata.timestamp, - epoch_index: input_metadata.epoch_index, - input_index: input_metadata.input_index, - }; - AdvanceStateRequest { - session_id: self.config.session_id.to_owned(), - active_epoch_index, - current_input_index, - input_metadata: Some(input_metadata), - input_payload: input_payload.clone(), - } - })?; - - tracing::trace!("waiting until the input is processed"); - - let processed_input = self - .wait_for_pending_inputs(active_epoch_index) - .await? - .pop() - .ok_or(ServerManagerError::MissingProcessedInputError {})?; - snafu::ensure!( - processed_input.input_index == current_input_index, - InvalidProcessedInputSnafu { - expected: current_input_index, - got: processed_input.input_index, - } - ); - - tracing::trace!("getting outputs"); - - let mut outputs = vec![]; - - let status = convert_completion_status(processed_input.status()); - let result = RollupsAdvanceResult { - input_index: current_input_index, - status, - }; - outputs.push(RollupsOutput::AdvanceResult(result)); - - for (index, report) in processed_input.reports.into_iter().enumerate() { - let report = RollupsReport { - index: index as u64, - input_index: current_input_index, - payload: Payload::new(report.payload), - }; - outputs.push(RollupsOutput::Report(report)); - } - - if let Some(one_of) = processed_input.processed_input_one_of { - match one_of { - ProcessedInputOneOf::AcceptedData(data) => { - for (index, voucher) in - data.vouchers.into_iter().enumerate() - { - let destination = - convert_address(get_field!(voucher.destination))?; - let voucher = RollupsVoucher { - index: index as u64, - input_index: current_input_index, - payload: Payload::new(voucher.payload), - destination, - }; - outputs.push(RollupsOutput::Voucher(voucher)); - } - for (index, notice) in data.notices.into_iter().enumerate() - { - let notice = RollupsNotice { - index: index as u64, - input_index: current_input_index, - payload: Payload::new(notice.payload), - }; - outputs.push(RollupsOutput::Notice(notice)); - } - } - _ => { - tracing::trace!("ignoring input not accepted"); - } - } - } - - tracing::trace!(?outputs, "got outputs from epoch status"); - - Ok(outputs) - } - - /// Send a finish-epoch request to the server-manager - /// Return the epoch claim and the proofs - #[tracing::instrument(level = "trace", skip_all)] - pub async fn finish_epoch( - &mut self, - epoch_index: u64, - ) -> Result<(RollupsClaim, Vec)> { - tracing::trace!(epoch_index, "sending finish epoch"); - - // Wait for pending inputs before sending a finish request - let processed_inputs = - self.wait_for_pending_inputs(epoch_index).await?; - let processed_input_count_within_epoch = processed_inputs.len() as u64; - - let response = grpc_call!(self, finish_epoch, { - FinishEpochRequest { - session_id: self.config.session_id.to_owned(), - active_epoch_index: epoch_index, - processed_input_count_within_epoch, - storage_directory: "".to_owned(), - } - })?; - - // Only try to get first and last after making the finish epoch request - let (first_input, last_input) = processed_inputs - .first() - .zip(processed_inputs.last()) - .context(EmptyEpochSnafu { epoch_index })?; - - let vouchers_metadata_hash = - convert_hash(get_field!(response.vouchers_epoch_root_hash))?; - let notices_metadata_hash = - convert_hash(get_field!(response.notices_epoch_root_hash))?; - let machine_state_hash = - convert_hash(get_field!(response.machine_hash))?; - let epoch_hash = compute_epoch_hash( - &vouchers_metadata_hash, - ¬ices_metadata_hash, - &machine_state_hash, - ); - tracing::trace!(?epoch_hash, "computed epoch hash"); - - let mut proofs = vec![]; - for proof in response.proofs { - let proof = convert_proof(proof)?; - proofs.push(RollupsOutput::Proof(proof)); - } - tracing::trace!(?proofs, "got proofs"); - - let rollups_claim = RollupsClaim { - dapp_address: self.dapp_address.clone(), - epoch_index, - epoch_hash, - first_index: first_input.input_index as u128, - last_index: last_input.input_index as u128, - }; - - Ok((rollups_claim, proofs)) - } - - /// Wait until the server-manager processes all pending inputs - /// Return the list of processed inputs for the given epoch - #[tracing::instrument(level = "trace", skip_all)] - async fn wait_for_pending_inputs( - &mut self, - epoch_index: u64, - ) -> Result> { - tracing::trace!(epoch_index, "waiting for pending inputs"); - - for _ in 0..self.config.pending_inputs_max_retries { - let response = grpc_call!(self, get_epoch_status, { - GetEpochStatusRequest { - session_id: self.config.session_id.to_owned(), - epoch_index, - } - })?; - if response.pending_input_count > 0 { - let duration = std::time::Duration::from_millis( - self.config.pending_inputs_sleep_duration, - ); - tracing::debug!( - "server-manager has {} pending inputs; sleeping for {} ms", - response.pending_input_count, - duration.as_millis(), - ); - tokio::time::sleep(duration).await; - } else { - return Ok(response.processed_inputs); - } - } - - tracing::warn!( - "the number of retries while waiting for pending inputs exceeded" - ); - - Err(ServerManagerError::PendingInputsExceededError {}) - } -} diff --git a/offchain/advance-runner/src/server_manager/mod.rs b/offchain/advance-runner/src/server_manager/mod.rs deleted file mode 100644 index 6018b4465..000000000 --- a/offchain/advance-runner/src/server_manager/mod.rs +++ /dev/null @@ -1,12 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod claim; -mod config; -mod conversions; -mod error; -mod facade; - -pub use config::{ServerManagerCLIConfig, ServerManagerConfig}; -pub use error::ServerManagerError; -pub use facade::ServerManagerFacade; diff --git a/offchain/advance-runner/tests/fixtures/mod.rs b/offchain/advance-runner/tests/fixtures/mod.rs deleted file mode 100644 index a5f68af10..000000000 --- a/offchain/advance-runner/tests/fixtures/mod.rs +++ /dev/null @@ -1,136 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use advance_runner::config::{ - AdvanceRunnerConfig, BrokerConfig, DAppMetadata, ServerManagerConfig, -}; -use advance_runner::AdvanceRunnerError; -use grpc_interfaces::cartesi_machine::{ - ConcurrencyConfig, MachineRuntimeConfig, -}; -use grpc_interfaces::cartesi_server_manager::{CyclesConfig, DeadlineConfig}; -use log::LogConfig; -use rollups_events::{Address, BrokerEndpoint}; -use std::cell::RefCell; -use std::time::Duration; -use tokio::task::JoinHandle; - -pub struct AdvanceRunnerFixture { - config: AdvanceRunnerConfig, - handler: RefCell>>>, -} - -impl AdvanceRunnerFixture { - pub async fn setup( - server_manager_endpoint: String, - session_id: String, - redis_endpoint: BrokerEndpoint, - chain_id: u64, - dapp_address: Address, - snapshot_dir: Option, - ) -> Self { - let runtime_config = MachineRuntimeConfig { - concurrency: Some(ConcurrencyConfig { - update_merkle_tree: 0, - }), - }; - - let deadline_config = DeadlineConfig { - checkin: 1000 * 5, - advance_state: 1000 * 60 * 3, - advance_state_increment: 1000 * 10, - inspect_state: 1000 * 60 * 3, - inspect_state_increment: 1000 * 10, - machine: 1000 * 60 * 5, - store: 1000 * 60 * 3, - fast: 1000 * 5, - }; - - let cycles_config = CyclesConfig { - max_advance_state: u64::MAX >> 2, - advance_state_increment: 1 << 22, - max_inspect_state: u64::MAX >> 2, - inspect_state_increment: 1 << 22, - }; - - let server_manager_config = ServerManagerConfig { - server_manager_endpoint, - machine_snapshot_path: snapshot_dir.unwrap_or("".to_owned()), - max_decoding_message_size: 100 * 1024 * 1024, - session_id, - pending_inputs_sleep_duration: 1000, - pending_inputs_max_retries: 10, - runtime_config, - deadline_config, - cycles_config, - }; - - let dapp_metadata = DAppMetadata { - chain_id, - dapp_address: dapp_address.clone(), - }; - - let broker_config = BrokerConfig { - redis_endpoint, - consume_timeout: 100, - backoff: Default::default(), - }; - - let backoff_max_elapsed_duration = Duration::from_millis(1); - - let config = AdvanceRunnerConfig { - server_manager_config, - broker_config, - dapp_metadata, - backoff_max_elapsed_duration, - healthcheck_port: 0, - log_config: LogConfig::default(), - reader_mode: false, - }; - let handler = RefCell::new(Some(start_advance_runner(config.clone()))); - Self { config, handler } - } - - /// Wait until the advance runner exists with an error - #[tracing::instrument(level = "trace", skip_all)] - pub async fn wait_err(&self) -> AdvanceRunnerError { - tracing::trace!("waiting for advance runner error"); - let handler = self.handler.replace(None); - handler - .expect("handler not found") - .await - .expect("failed to wait for handler") - .expect_err("advance runner should exit with an error") - } - - /// Abort the current advance runner, wait it to finish and start another one - #[tracing::instrument(level = "trace", skip_all)] - pub async fn restart(&self) { - tracing::trace!("restartin advance runner"); - let handler = self.handler.replace(None).expect("handler not found"); - handler.abort(); - handler - .await - .expect_err("advance runner finished before abort"); - let new_handler = start_advance_runner(self.config.clone()); - self.handler.replace(Some(new_handler)); - } -} - -fn start_advance_runner( - config: AdvanceRunnerConfig, -) -> JoinHandle> { - tokio::spawn(async move { - let output = advance_runner::run(config).await; - tracing::error!(?output, "advance_runner exited"); - output - }) -} - -impl Drop for AdvanceRunnerFixture { - fn drop(&mut self) { - if let Some(handler) = self.handler.borrow().as_ref() { - handler.abort(); - } - } -} diff --git a/offchain/advance-runner/tests/host_integration.rs b/offchain/advance-runner/tests/host_integration.rs deleted file mode 100644 index 2604c3078..000000000 --- a/offchain/advance-runner/tests/host_integration.rs +++ /dev/null @@ -1,297 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use fixtures::AdvanceRunnerFixture; -use rand::Rng; -use rollups_events::{ - Hash, InputMetadata, Payload, RollupsAdvanceStateInput, RollupsClaim, - RollupsData, RollupsInput, INITIAL_ID, -}; -use test_fixtures::{BrokerFixture, EchoDAppFixture, HostServerManagerFixture}; -use testcontainers::clients::Cli; - -mod fixtures; - -struct TestState<'d> { - broker: BrokerFixture<'d>, - server_manager: HostServerManagerFixture<'d>, - advance_runner: AdvanceRunnerFixture, -} - -impl TestState<'_> { - async fn setup(docker: &Cli) -> TestState<'_> { - let broker = BrokerFixture::setup(docker).await; - let server_manager = HostServerManagerFixture::setup(docker).await; - - let endpoint = server_manager.http_endpoint().to_owned(); - tokio::spawn(async move { - if let Err(e) = EchoDAppFixture::start_echo_dapp(endpoint).await { - tracing::error!("error running echo dapp thread: {:?}", e); - } - }); - - let advance_runner = AdvanceRunnerFixture::setup( - server_manager.grpc_endpoint().to_owned(), - server_manager.session_id().to_owned(), - broker.redis_endpoint().to_owned(), - broker.chain_id(), - broker.dapp_address().to_owned(), - None, - ) - .await; - - TestState { - broker, - server_manager, - advance_runner, - } - } -} - -fn generate_payload() -> Payload { - let len = rand::thread_rng().gen_range(100..200); - let data: Vec = (0..len).map(|_| rand::thread_rng().gen()).collect(); - Payload::new(data) -} - -#[test_log::test(tokio::test)] -async fn start_server_manager_session() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("checking whether advance_runner created session"); - state.server_manager.assert_session_ready().await; -} - -#[test_log::test(tokio::test)] -async fn send_inputs_to_server_manager() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - const N: usize = 3; - tracing::info!("producing {} inputs", N); - - let payloads: Vec<_> = (0..N).map(|_| generate_payload()).collect(); - for (i, payload) in payloads.iter().enumerate() { - let data = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: i as u64, - ..Default::default() - }, - payload: payload.clone().into(), - tx_hash: Hash::default(), - }); - state.broker.produce_input_event(data).await; - } - - tracing::info!("waiting until the inputs are processed"); - state.server_manager.assert_session_ready().await; - state - .server_manager - .assert_epoch_status_payloads(0, &payloads) - .await; -} - -#[test_log::test(tokio::test)] -async fn advance_runner_fails_when_inputs_has_wrong_epoch() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("producing input with wrong epoch index"); - let data = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 0, - ..Default::default() - }, - payload: Default::default(), - tx_hash: Hash::default(), - }); - let input = RollupsInput { - parent_id: INITIAL_ID.to_owned(), - epoch_index: 1, - inputs_sent_count: 1, - data, - }; - state.broker.produce_raw_input_event(input).await; - - tracing::info!("waiting for the advance_runner to exit with error"); - let err = state.advance_runner.wait_err().await; - assert!(format!("{:?}", err).contains("incorrect active epoch index")); -} - -#[test_log::test(tokio::test)] -async fn advance_runner_fails_when_inputs_has_wrong_parent_id() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("producing input with wrong parent id"); - let data = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 0, - ..Default::default() - }, - payload: Default::default(), - tx_hash: Hash::default(), - }); - let input = RollupsInput { - parent_id: "invalid".to_owned(), - epoch_index: 0, - inputs_sent_count: 1, - data, - }; - state.broker.produce_raw_input_event(input).await; - - tracing::info!("waiting for the advance_runner to exit with error"); - let err = state.advance_runner.wait_err().await; - assert!(matches!( - err, - advance_runner::AdvanceRunnerError::RunnerError { - source: advance_runner::RunnerError::ConsumeInputError { - source: advance_runner::BrokerFacadeError::ParentIdMismatchError { - expected, - got, - } - } - } if expected == "0".to_owned() && got == "invalid".to_owned() - )); -} - -#[test_log::test(tokio::test)] -async fn advance_runner_generates_claim_after_finishing_epoch() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - const N: usize = 3; - tracing::info!("producing {} finish epoch events", N); - for i in 0..N { - let advance = - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: i as u64, - ..Default::default() - }, - payload: generate_payload(), - tx_hash: Hash::default(), - }); - let finish = RollupsData::FinishEpoch {}; - state.broker.produce_input_event(advance).await; - state.broker.produce_input_event(finish).await; - } - - tracing::info!("waiting until the expected claims are generated"); - state.server_manager.assert_session_ready().await; - let claims = state.broker.consume_n_claims(N).await; - // We don't verify the claim hash because it is not the resposability of the - // advance_runner and because it changes every time we update the Cartesi Machine. - assert_eq!(claims.len(), N); -} - -#[test_log::test(tokio::test)] -async fn advance_runner_finishes_epoch_when_the_previous_epoch_has_inputs() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("finishing epochs with no inputs"); - state - .broker - .produce_input_event(RollupsData::FinishEpoch {}) - .await; - state - .broker - .produce_input_event(RollupsData::FinishEpoch {}) - .await; - - tracing::info!("waiting until second epoch is finished"); - state.server_manager.assert_epoch_finished(1).await; - - tracing::info!("checking it didn't produce claims"); - let claims = state.broker.consume_all_claims().await; - assert_eq!(claims.len(), 0); -} - -/// Send an input, an finish epoch, and another input. -/// After the second input is processed by the server-manager we know -/// for sure that the advance_runner finished processing the finish epoch. -/// We can't simply wait for the epoch to be finished because the advance_runner -/// still does tasks after that. -async fn finish_epoch_and_wait_for_next_input(state: &TestState<'_>) { - tracing::info!("producing input, finish, and another input"); - let payload = generate_payload(); - let inputs = vec![ - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 0, - ..Default::default() - }, - payload: Default::default(), - tx_hash: Hash::default(), - }), - RollupsData::FinishEpoch {}, - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 1, - ..Default::default() - }, - payload: payload.clone(), - tx_hash: Hash::default(), - }), - ]; - for input in inputs { - state.broker.produce_input_event(input).await; - } - - tracing::info!("waiting until second input is processed"); - state.server_manager.assert_session_ready().await; - state.server_manager.assert_epoch_status(0, 1).await; - state - .server_manager - .assert_epoch_status_payloads(1, &vec![payload]) - .await; -} - -#[test_log::test(tokio::test)] -async fn advance_runner_sends_inputs_after_finishing_epoch() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - finish_epoch_and_wait_for_next_input(&state).await; -} - -#[test_log::test(tokio::test)] -async fn advance_runner_does_not_generate_duplicate_claim() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("producing claim"); - let claim = RollupsClaim::default(); - state.broker.produce_rollups_claim(claim.clone()).await; - - finish_epoch_and_wait_for_next_input(&state).await; - - tracing::info!("getting all claims"); - let produced_claims = state.broker.consume_all_claims().await; - assert_eq!(produced_claims.len(), 1); - assert_eq!(produced_claims[0], claim); -} - -#[test_log::test(tokio::test)] -async fn advance_runner_restore_session_after_restart() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - finish_epoch_and_wait_for_next_input(&state).await; - - tracing::info!("restarting advance_runner"); - state.advance_runner.restart().await; - - tracing::info!("producing another input and checking"); - let input = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 2, - ..Default::default() - }, - payload: generate_payload(), - tx_hash: Hash::default(), - }); - state.broker.produce_input_event(input).await; - state.server_manager.assert_epoch_status(0, 1).await; -} diff --git a/offchain/advance-runner/tests/server_integration.rs b/offchain/advance-runner/tests/server_integration.rs deleted file mode 100644 index 0ef18d97f..000000000 --- a/offchain/advance-runner/tests/server_integration.rs +++ /dev/null @@ -1,296 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use fixtures::AdvanceRunnerFixture; -use rand::Rng; -use rollups_events::{ - Hash, InputMetadata, Payload, RollupsAdvanceStateInput, RollupsClaim, - RollupsData, RollupsInput, INITIAL_ID, -}; -use test_fixtures::{ - BrokerFixture, MachineSnapshotsFixture, ServerManagerFixture, -}; -use testcontainers::clients::Cli; - -mod fixtures; - -struct TestState<'d> { - _snapshots: MachineSnapshotsFixture, - broker: BrokerFixture<'d>, - server_manager: ServerManagerFixture<'d>, - advance_runner: AdvanceRunnerFixture, -} - -impl TestState<'_> { - async fn setup(docker: &Cli) -> TestState<'_> { - let snapshots = MachineSnapshotsFixture::setup(); - let broker = BrokerFixture::setup(docker).await; - let server_manager = - ServerManagerFixture::setup(docker, &snapshots.path()).await; - let advance_runner = AdvanceRunnerFixture::setup( - server_manager.endpoint().to_owned(), - server_manager.session_id().to_owned(), - broker.redis_endpoint().to_owned(), - broker.chain_id(), - broker.dapp_address().to_owned(), - Some(snapshots.path().to_string_lossy().to_string()), - ) - .await; - TestState { - _snapshots: snapshots, - broker, - server_manager, - advance_runner, - } - } -} - -fn generate_payload() -> Payload { - let len = rand::thread_rng().gen_range(100..200); - let data: Vec = (0..len).map(|_| rand::thread_rng().gen()).collect(); - Payload::new(data) -} - -#[test_log::test(tokio::test)] -async fn test_advance_runner_starts_server_manager_session() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("checking whether advance_runner created session"); - state.server_manager.assert_session_ready().await; -} - -#[test_log::test(tokio::test)] -async fn test_advance_runner_sends_inputs_to_server_manager() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - const N: usize = 3; - tracing::info!("producing {} inputs", N); - let payloads: Vec<_> = (0..N).map(|_| generate_payload()).collect(); - for (i, payload) in payloads.iter().enumerate() { - let data = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: i as u64, - ..Default::default() - }, - payload: payload.clone().into(), - tx_hash: Hash::default(), - }); - state.broker.produce_input_event(data).await; - } - - tracing::info!("waiting until the inputs are processed"); - state.server_manager.assert_session_ready().await; - state - .server_manager - .assert_epoch_status_payloads(0, &payloads) - .await; -} - -#[test_log::test(tokio::test)] -async fn test_advance_runner_fails_when_inputs_has_wrong_epoch() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("producing input with wrong epoch index"); - let data = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 0, - ..Default::default() - }, - payload: Default::default(), - tx_hash: Hash::default(), - }); - let input = RollupsInput { - parent_id: INITIAL_ID.to_owned(), - epoch_index: 1, - inputs_sent_count: 1, - data, - }; - state.broker.produce_raw_input_event(input).await; - - tracing::info!("waiting for the advance_runner to exit with error"); - let err = state.advance_runner.wait_err().await; - assert!(format!("{:?}", err).contains("incorrect active epoch index")); -} - -#[test_log::test(tokio::test)] -async fn test_advance_runner_fails_when_inputs_has_wrong_parent_id() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("producing input with wrong parent id"); - let data = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 0, - ..Default::default() - }, - payload: Default::default(), - tx_hash: Hash::default(), - }); - let input = RollupsInput { - parent_id: "invalid".to_owned(), - epoch_index: 0, - inputs_sent_count: 1, - data, - }; - state.broker.produce_raw_input_event(input).await; - - tracing::info!("waiting for the advance_runner to exit with error"); - let err = state.advance_runner.wait_err().await; - assert!(matches!( - err, - advance_runner::AdvanceRunnerError::RunnerError { - source: advance_runner::RunnerError::ConsumeInputError { - source: advance_runner::BrokerFacadeError::ParentIdMismatchError { - expected, - got, - } - } - } if expected == "0".to_owned() && got == "invalid".to_owned() - )); -} - -#[test_log::test(tokio::test)] -async fn test_advance_runner_generates_claim_after_finishing_epoch() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - const N: usize = 3; - tracing::info!("producing {} finish epoch events", N); - for i in 0..N { - let advance = - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: i as u64, - ..Default::default() - }, - payload: generate_payload(), - tx_hash: Hash::default(), - }); - let finish = RollupsData::FinishEpoch {}; - state.broker.produce_input_event(advance).await; - state.broker.produce_input_event(finish).await; - } - - tracing::info!("waiting until the expected claims are generated"); - state.server_manager.assert_session_ready().await; - let claims = state.broker.consume_n_claims(N).await; - // We don't verify the claim hash because it is not the resposability of the - // advance_runner and because it changes every time we update the Cartesi Machine. - assert_eq!(claims.len(), N); -} - -#[test_log::test(tokio::test)] -async fn test_advance_runner_finishes_epoch_when_it_has_no_inputs() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("finishing epochs with no inputs"); - state - .broker - .produce_input_event(RollupsData::FinishEpoch {}) - .await; - state - .broker - .produce_input_event(RollupsData::FinishEpoch {}) - .await; - - tracing::info!("waiting until second epoch is finished"); - state.server_manager.assert_epoch_finished(1).await; - - tracing::info!("checking it didn't produce claims"); - let claims = state.broker.consume_all_claims().await; - assert_eq!(claims.len(), 0); -} - -/// Send an input, an finish epoch, and another input. -/// After the second input is processed by the server-manager we know -/// for sure that the advance_runner finished processing the finish epoch. -/// We can't simply wait for the epoch to be finished because the advance_runner -/// still does tasks after that. -async fn finish_epoch_and_wait_for_next_input(state: &TestState<'_>) { - tracing::info!("producing input, finish, and another input"); - let payload = generate_payload(); - let inputs = vec![ - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 0, - ..Default::default() - }, - payload: Default::default(), - tx_hash: Hash::default(), - }), - RollupsData::FinishEpoch {}, - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 1, - ..Default::default() - }, - payload: payload.clone(), - tx_hash: Hash::default(), - }), - ]; - for input in inputs { - state.broker.produce_input_event(input).await; - } - - tracing::info!("waiting until second input is processed"); - state.server_manager.assert_session_ready().await; - state.server_manager.assert_epoch_status(0, 1).await; - state - .server_manager - .assert_epoch_status_payloads(1, &vec![payload]) - .await; -} - -#[test_log::test(tokio::test)] -async fn test_advance_runner_sends_inputs_after_finishing_epoch() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - finish_epoch_and_wait_for_next_input(&state).await; -} - -#[test_log::test(tokio::test)] -async fn test_advance_runner_does_not_generate_duplicate_claim() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("producing claim"); - let rollups_claim = RollupsClaim::default(); - state - .broker - .produce_rollups_claim(rollups_claim.clone()) - .await; - - finish_epoch_and_wait_for_next_input(&state).await; - - tracing::info!("getting all claims"); - let produced_claims = state.broker.consume_all_claims().await; - assert_eq!(produced_claims.len(), 1); - assert_eq!(produced_claims[0].epoch_hash, rollups_claim.epoch_hash); -} - -#[test_log::test(tokio::test)] -async fn test_advance_runner_restore_session_after_restart() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - finish_epoch_and_wait_for_next_input(&state).await; - - tracing::info!("restarting advance_runner"); - state.advance_runner.restart().await; - - tracing::info!("producing another input and checking"); - let input = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index: 2, - ..Default::default() - }, - payload: generate_payload(), - tx_hash: Hash::default(), - }); - state.broker.produce_input_event(input).await; - state.server_manager.assert_epoch_status(1, 2).await; -} diff --git a/offchain/authority-claimer/Cargo.toml b/offchain/authority-claimer/Cargo.toml deleted file mode 100644 index 0b589d166..000000000 --- a/offchain/authority-claimer/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "authority-claimer" -edition.workspace = true -license.workspace = true -version.workspace = true - -[[bin]] -name = "cartesi-rollups-authority-claimer" -path = "src/main.rs" -test = false - -[dependencies] -contracts = { path = "../contracts" } -http-server = { path = "../http-server" } -log = { path = "../log" } -rollups-events = { path = "../rollups-events" } -types = { path = "../types" } -redacted = { path = "../redacted" } - -async-trait.workspace = true -clap = { workspace = true, features = ["derive", "env"] } -eth-tx-manager.workspace = true -ethabi.workspace = true -ethers-signers = { workspace = true, features = ["aws"] } -ethers.workspace = true -rusoto_core.workspace = true -rusoto_kms.workspace = true -rusoto_sts.workspace = true -serde.workspace = true -serde_json.workspace = true -snafu.workspace = true -tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } -tracing.workspace = true -url.workspace = true - -[dev-dependencies] -test-fixtures = { path = "../test-fixtures" } - -backoff = { workspace = true, features = ["tokio"] } -serial_test.workspace = true -testcontainers.workspace = true -tracing-test = { workspace = true, features = ["no-env-filter"] } diff --git a/offchain/clippy.toml b/offchain/clippy.toml deleted file mode 100644 index f19238120..000000000 --- a/offchain/clippy.toml +++ /dev/null @@ -1,2 +0,0 @@ -# This avoids warning `large_enum_variant` on `pub enum MachineOneof`, built from grpc-interfaces. -enum-variant-size-threshold = 2500 diff --git a/offchain/contracts/Cargo.toml b/offchain/contracts/Cargo.toml deleted file mode 100644 index c5a76781a..000000000 --- a/offchain/contracts/Cargo.toml +++ /dev/null @@ -1,16 +0,0 @@ -[package] -name = "contracts" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -eth-state-fold-types = { workspace = true, features = ["ethers"] } - -[build-dependencies] -eth-state-fold-types = { workspace = true, features = ["ethers"] } -tempfile.workspace = true -snafu.workspace = true - -[package.metadata.cargo-machete] -ignored = ["eth-state-fold-types"] diff --git a/offchain/contracts/README.md b/offchain/contracts/README.md deleted file mode 100644 index 06132e915..000000000 --- a/offchain/contracts/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Contracts - -Library crate for loading a contract ABI. diff --git a/offchain/data/Cargo.toml b/offchain/data/Cargo.toml deleted file mode 100644 index a8b0d86b6..000000000 --- a/offchain/data/Cargo.toml +++ /dev/null @@ -1,26 +0,0 @@ -[package] -name = "rollups-data" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -redacted = { path = "../redacted" } - -backoff.workspace = true -base64.workspace = true -clap = { workspace = true, features = ["derive", "env"] } -diesel_migrations.workspace = true -diesel = { workspace = true, features = ["postgres", "r2d2"]} -snafu.workspace = true -tracing.workspace = true - -[dev-dependencies] -test-fixtures = { path = "../test-fixtures" } - -serial_test.workspace = true -env_logger.workspace = true -tempfile.workspace = true -testcontainers.workspace = true -test-log = { workspace = true, features = ["trace"] } -tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/offchain/data/README.md b/offchain/data/README.md deleted file mode 100644 index 2d8b5658a..000000000 --- a/offchain/data/README.md +++ /dev/null @@ -1,71 +0,0 @@ -# Rollups data - -This crate generates the PostgreSQL database schema used to store the rollups data: -all of its inputs, notices, vouchers, reports and proofs. - -## Running PostgreSQL locally - -If you want to run the PostgreSQL in your machine, you can run the following docker command: - -```sh -docker run --rm --name test-postgres -e POSTGRES_PASSWORD=pw -p 5432:5432 -d postgres:13 -``` - -## Setup diesel - -The database migration requires [diesel](https://diesel.rs/). -Before installing diesel, you need to install the PostgreSQL development library. -In Ubuntu, run the following command: - -```sh -sudo apt install libpq-dev -``` - -Or if you are using MacOS: - -```sh -brew install libpq && -echo 'export PATH="/opt/homebrew/opt/libpq/bin:$PATH"' >> ~/.zshrc -``` - -To install diesel run the following command: - -```sh -cargo install diesel_cli --no-default-features --features postgres -``` - -Then, setup the environment variable DATABASE\_URL referencing to the local PostgreSQL. - -```sh -echo "export DATABASE_URL=postgres://postgres:pw@localhost:5432/postgres" > .env -``` - -## Perform database migrations - -Follow the commands below to perform the migration. -This procedure creates the tables in PostgreSQL and also generates the file `src/schema.rs`. - -```sh -diesel migration run -``` - -## Modifying the database schema - -To modify the database schema, you should edit the files in the `migration` dir. -For more details, please follow the instructions on [the diesel site](https://diesel.rs). - -## Test - -To run the automated tests, run the following command: - -```sh -cargo test -``` - -### Manual tests - -If you want to fiddle with the database, you can populate it by running: - -```sh -psql -h localhost -U postgres -d postgres -a -f util/populate.sql -``` diff --git a/offchain/data/build.rs b/offchain/data/build.rs deleted file mode 100644 index 3146146d4..000000000 --- a/offchain/data/build.rs +++ /dev/null @@ -1,6 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -fn main() { - println!("cargo:rerun-if-changed=migrations"); -} diff --git a/offchain/data/diesel.toml b/offchain/data/diesel.toml deleted file mode 100644 index 92267c829..000000000 --- a/offchain/data/diesel.toml +++ /dev/null @@ -1,5 +0,0 @@ -# For documentation on how to configure this file, -# see diesel.rs/guides/configuring-diesel-cli - -[print_schema] -file = "src/schema.rs" diff --git a/offchain/data/migrations/00000000000000_diesel_initial_setup/down.sql b/offchain/data/migrations/00000000000000_diesel_initial_setup/down.sql deleted file mode 100644 index a9f526091..000000000 --- a/offchain/data/migrations/00000000000000_diesel_initial_setup/down.sql +++ /dev/null @@ -1,6 +0,0 @@ --- This file was automatically created by Diesel to setup helper functions --- and other internal bookkeeping. This file is safe to edit, any future --- changes will be added to existing projects as new migrations. - -DROP FUNCTION IF EXISTS diesel_manage_updated_at(_tbl regclass); -DROP FUNCTION IF EXISTS diesel_set_updated_at(); diff --git a/offchain/data/migrations/00000000000000_diesel_initial_setup/up.sql b/offchain/data/migrations/00000000000000_diesel_initial_setup/up.sql deleted file mode 100644 index d68895b1a..000000000 --- a/offchain/data/migrations/00000000000000_diesel_initial_setup/up.sql +++ /dev/null @@ -1,36 +0,0 @@ --- This file was automatically created by Diesel to setup helper functions --- and other internal bookkeeping. This file is safe to edit, any future --- changes will be added to existing projects as new migrations. - - - - --- Sets up a trigger for the given table to automatically set a column called --- `updated_at` whenever the row is modified (unless `updated_at` was included --- in the modified columns) --- --- # Example --- --- ```sql --- CREATE TABLE users (id SERIAL PRIMARY KEY, updated_at TIMESTAMP NOT NULL DEFAULT NOW()); --- --- SELECT diesel_manage_updated_at('users'); --- ``` -CREATE OR REPLACE FUNCTION diesel_manage_updated_at(_tbl regclass) RETURNS VOID AS $$ -BEGIN - EXECUTE format('CREATE TRIGGER set_updated_at BEFORE UPDATE ON %s - FOR EACH ROW EXECUTE PROCEDURE diesel_set_updated_at()', _tbl); -END; -$$ LANGUAGE plpgsql; - -CREATE OR REPLACE FUNCTION diesel_set_updated_at() RETURNS trigger AS $$ -BEGIN - IF ( - NEW IS DISTINCT FROM OLD AND - NEW.updated_at IS NOT DISTINCT FROM OLD.updated_at - ) THEN - NEW.updated_at := current_timestamp; - END IF; - RETURN NEW; -END; -$$ LANGUAGE plpgsql; diff --git a/offchain/data/migrations/20230110182039_rollups/down.sql b/offchain/data/migrations/20230110182039_rollups/down.sql deleted file mode 100644 index b17076778..000000000 --- a/offchain/data/migrations/20230110182039_rollups/down.sql +++ /dev/null @@ -1,10 +0,0 @@ --- (c) Cartesi and individual authors (see AUTHORS) --- SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -DROP TABLE "vouchers"; -DROP TABLE "notices"; -DROP TABLE "reports"; -DROP TABLE "proofs"; -DROP TABLE "inputs"; - -DROP TYPE "OutputEnum"; diff --git a/offchain/data/migrations/20230110182039_rollups/up.sql b/offchain/data/migrations/20230110182039_rollups/up.sql deleted file mode 100644 index 200542cae..000000000 --- a/offchain/data/migrations/20230110182039_rollups/up.sql +++ /dev/null @@ -1,60 +0,0 @@ --- (c) Cartesi and individual authors (see AUTHORS) --- SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -CREATE TABLE "inputs" -( - "index" INT NOT NULL, - "msg_sender" BYTEA NOT NULL, - "tx_hash" BYTEA NOT NULL, - "block_number" BIGINT NOT NULL, - "timestamp" TIMESTAMP NOT NULL, - "payload" BYTEA NOT NULL, - CONSTRAINT "inputs_pkey" PRIMARY KEY ("index") -); - -CREATE TABLE "vouchers" -( - "input_index" INT NOT NULL, - "index" INT NOT NULL, - "destination" BYTEA NOT NULL, - "payload" BYTEA NOT NULL, - CONSTRAINT "vouchers_pkey" PRIMARY KEY ("input_index", "index"), - CONSTRAINT "vouchers_input_index_fkey" FOREIGN KEY ("input_index") REFERENCES "inputs"("index") -); - -CREATE TABLE "notices" -( - "input_index" INT NOT NULL, - "index" INT NOT NULL, - "payload" BYTEA NOT NULL, - CONSTRAINT "notices_pkey" PRIMARY KEY ("input_index", "index"), - CONSTRAINT "notices_input_index_fkey" FOREIGN KEY ("input_index") REFERENCES "inputs"("index") -); - -CREATE TABLE "reports" -( - "input_index" INT NOT NULL, - "index" INT NOT NULL, - "payload" BYTEA NOT NULL, - CONSTRAINT "reports_pkey" PRIMARY KEY ("input_index", "index"), - CONSTRAINT "reports_input_index_fkey" FOREIGN KEY ("input_index") REFERENCES "inputs"("index") -); - -CREATE TYPE "OutputEnum" AS ENUM ('voucher', 'notice'); - -CREATE TABLE "proofs" -( - "input_index" INT NOT NULL, - "output_index" INT NOT NULL, - "output_enum" "OutputEnum" NOT NULL, - "validity_input_index_within_epoch" INT NOT NULL, - "validity_output_index_within_input" INT NOT NULL, - "validity_output_hashes_root_hash" BYTEA NOT NULL, - "validity_vouchers_epoch_root_hash" BYTEA NOT NULL, - "validity_notices_epoch_root_hash" BYTEA NOT NULL, - "validity_machine_state_hash" BYTEA NOT NULL, - "validity_output_hash_in_output_hashes_siblings" BYTEA[] NOT NULL, - "validity_output_hashes_in_epoch_siblings" BYTEA[] NOT NULL, - "context" BYTEA NOT NULL, - CONSTRAINT "proofs_pkey" PRIMARY KEY ("input_index", "output_index", "output_enum") -); diff --git a/offchain/data/migrations/20230921143147_completion_status/down.sql b/offchain/data/migrations/20230921143147_completion_status/down.sql deleted file mode 100644 index d392633ef..000000000 --- a/offchain/data/migrations/20230921143147_completion_status/down.sql +++ /dev/null @@ -1,5 +0,0 @@ --- This file should undo anything in `up.sql` - -ALTER TABLE "inputs" DROP "status"; - -DROP TYPE "CompletionStatus"; diff --git a/offchain/data/migrations/20230921143147_completion_status/up.sql b/offchain/data/migrations/20230921143147_completion_status/up.sql deleted file mode 100644 index c4d02f0f6..000000000 --- a/offchain/data/migrations/20230921143147_completion_status/up.sql +++ /dev/null @@ -1,14 +0,0 @@ --- Your SQL goes here - -CREATE TYPE "CompletionStatus" AS ENUM ( - 'Unprocessed', - 'Accepted', - 'Rejected', - 'Exception', - 'MachineHalted', - 'CycleLimitExceeded', - 'TimeLimitExceeded', - 'PayloadLengthLimitExceeded' -); - -ALTER TABLE "inputs" ADD "status" "CompletionStatus" NOT NULL DEFAULT 'Unprocessed'; diff --git a/offchain/data/src/config.rs b/offchain/data/src/config.rs deleted file mode 100644 index e93c514d5..000000000 --- a/offchain/data/src/config.rs +++ /dev/null @@ -1,76 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use backoff::{ExponentialBackoff, ExponentialBackoffBuilder}; -use clap::Parser; -pub use redacted::{RedactedUrl, Url}; -use std::time::Duration; - -#[derive(Debug)] -pub struct RepositoryConfig { - pub redacted_endpoint: Option, - pub connection_pool_size: u32, - pub backoff: ExponentialBackoff, -} - -impl RepositoryConfig { - /// Get the string with the endpoint if it is set, otherwise return an empty string - pub fn endpoint(&self) -> String { - match &self.redacted_endpoint { - None => String::from(""), - Some(endpoint) => endpoint.inner().to_string(), - } - } -} - -#[derive(Debug, Parser)] -pub struct RepositoryCLIConfig { - /// Postgres endpoint in the format 'postgres://user:password@hostname:port/database'. - /// - /// If not set, or set to empty string, will defer the behaviour to the Pg driver. - /// See: https://www.postgresql.org/docs/current/libpq-envars.html - /// - /// It is also possible to set the endpoint without a password and load it from Postgres' - /// passfile. - /// See: https://www.postgresql.org/docs/current/libpq-connect.html#LIBPQ-CONNECT-PASSFILE - #[arg(long, env)] - postgres_endpoint: Option, - - /// Number of connections to the database - #[arg(long, env, default_value_t = 3)] - postgres_connection_pool_size: u32, - - /// Max elapsed time for timeout - #[arg(long, env, default_value = "120000")] - postgres_backoff_max_elapsed_duration: u64, -} - -impl From for RepositoryConfig { - fn from(cli_config: RepositoryCLIConfig) -> RepositoryConfig { - let redacted_endpoint = match cli_config.postgres_endpoint { - None => None, - Some(endpoint) => { - if endpoint.is_empty() { - None - } else { - Some(RedactedUrl::new( - Url::parse(endpoint.as_str()) - .expect("failed to parse Postgres URL"), - )) - } - } - }; - let connection_pool_size = cli_config.postgres_connection_pool_size; - let backoff_max_elapsed_duration = Duration::from_millis( - cli_config.postgres_backoff_max_elapsed_duration, - ); - let backoff = ExponentialBackoffBuilder::new() - .with_max_elapsed_time(Some(backoff_max_elapsed_duration)) - .build(); - RepositoryConfig { - redacted_endpoint, - connection_pool_size, - backoff, - } - } -} diff --git a/offchain/data/src/error.rs b/offchain/data/src/error.rs deleted file mode 100644 index ca009b458..000000000 --- a/offchain/data/src/error.rs +++ /dev/null @@ -1,39 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(super)))] -pub enum Error { - #[snafu(display("database pool connection error"))] - DatabaseConnectionError { - source: backoff::Error, - }, - - #[snafu(display("database error"))] - DatabaseError { source: diesel::result::Error }, - - #[snafu(display("{} not found", item_type))] - ItemNotFound { item_type: String }, - - #[snafu(display("failed to decode UTF8 cursor"))] - DecodeUTF8CursorError { source: std::str::Utf8Error }, - - #[snafu(display("failed to decode base64 cursor"))] - DecodeBase64CursorError { source: base64::DecodeError }, - - #[snafu(display("failed to parse cursor"))] - ParseCursorError { source: std::num::ParseIntError }, - - #[snafu(display( - "cannot mix forward pagination (first, after) with backward pagination (last, before)" - ))] - MixedPaginationError {}, - - #[snafu(display("invalid pagination cursor {}", arg))] - PaginationCursorError { arg: String }, - - #[snafu(display("invalid pagination limit {}", arg))] - PaginationLimitError { arg: String }, -} diff --git a/offchain/data/src/lib.rs b/offchain/data/src/lib.rs deleted file mode 100644 index 548414c1f..000000000 --- a/offchain/data/src/lib.rs +++ /dev/null @@ -1,20 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod config; -mod error; -mod migrations; -mod pagination; -mod repository; -mod schema; -mod types; - -pub use config::{RedactedUrl, RepositoryCLIConfig, RepositoryConfig, Url}; -pub use error::Error; -pub use migrations::{run_migrations, MigrationError}; -pub use pagination::{Connection, Cursor, Edge, PageInfo}; -pub use repository::Repository; -pub use types::{ - CompletionStatus, Input, InputQueryFilter, Notice, NoticeQueryFilter, - OutputEnum, Proof, Report, ReportQueryFilter, Voucher, VoucherQueryFilter, -}; diff --git a/offchain/data/src/migrations.rs b/offchain/data/src/migrations.rs deleted file mode 100644 index 36f12de21..000000000 --- a/offchain/data/src/migrations.rs +++ /dev/null @@ -1,35 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use diesel::{pg::PgConnection, Connection}; -use diesel_migrations::{ - embed_migrations, EmbeddedMigrations, MigrationHarness, -}; -use snafu::{ResultExt, Snafu}; -const MIGRATIONS: EmbeddedMigrations = embed_migrations!("migrations"); - -#[derive(Debug, Snafu)] -pub enum MigrationError { - #[snafu(display("connection error"))] - ConnectionError { source: diesel::ConnectionError }, - - #[snafu(display("migration error"))] - RunMigrationError { - source: Box, - }, -} - -pub fn run_migrations(postgres_endpoint: &str) -> Result<(), MigrationError> { - tracing::trace!("running pending migrations"); - - let mut connection = - PgConnection::establish(postgres_endpoint).context(ConnectionSnafu)?; - let migrations = connection - .run_pending_migrations(MIGRATIONS) - .context(RunMigrationSnafu)?; - for migration in migrations.iter() { - tracing::trace!("runned migration {}", migration); - } - - Ok(()) -} diff --git a/offchain/data/src/pagination.rs b/offchain/data/src/pagination.rs deleted file mode 100644 index 444cb2ced..000000000 --- a/offchain/data/src/pagination.rs +++ /dev/null @@ -1,628 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use base64::{engine::general_purpose::STANDARD as base64_engine, Engine as _}; -use snafu::ResultExt; -use std::fmt::Debug; - -use super::error::{ - DecodeBase64CursorSnafu, DecodeUTF8CursorSnafu, Error, - MixedPaginationSnafu, PaginationCursorSnafu, PaginationLimitSnafu, - ParseCursorSnafu, -}; - -const DEFAULT_PAGINATION_LIMIT: i32 = 1000; - -macro_rules! ensure_cursor { - ($arg: ident, $total_count: expr) => {{ - let cursor = Cursor::decode(&$arg)?; - snafu::ensure!( - cursor.offset >= 0 && cursor.offset < $total_count, - PaginationCursorSnafu { - arg: stringify!($arg), - } - ); - cursor.offset - }}; -} - -macro_rules! ensure_limit { - ($arg: ident) => { - match $arg { - Some(limit) => { - snafu::ensure!( - limit >= 0, - PaginationLimitSnafu { - arg: stringify!($arg), - } - ); - std::cmp::min(limit, DEFAULT_PAGINATION_LIMIT) - } - None => DEFAULT_PAGINATION_LIMIT, - } - }; -} - -#[derive(Debug, PartialEq)] -pub struct Pagination { - total_count: i32, - offset: i32, - limit: i32, -} - -impl Pagination { - pub fn new( - first: Option, - last: Option, - after: Option, - before: Option, - total_count: i32, - ) -> Result { - let forward = first.is_some() || after.is_some(); - let backward = last.is_some() || before.is_some(); - snafu::ensure!(!forward || !backward, MixedPaginationSnafu); - if backward { - let before_offset = match before { - Some(before) => ensure_cursor!(before, total_count), - None => total_count, - }; - let limit = ensure_limit!(last); - if limit >= before_offset { - Ok(Self { - total_count, - offset: 0, - limit: before_offset, - }) - } else { - Ok(Self { - total_count, - offset: before_offset - limit, - limit, - }) - } - } else { - let offset = match after { - Some(after) => ensure_cursor!(after, total_count) + 1, - None => 0, - }; - let limit = ensure_limit!(first); - if offset + limit > total_count { - Ok(Self { - total_count, - offset, - limit: total_count - offset, - }) - } else { - Ok(Self { - total_count, - offset, - limit, - }) - } - } - } - - pub fn offset(&self) -> i32 { - self.offset - } - - pub fn limit(&self) -> i32 { - self.limit - } - - pub fn create_connection(&self, nodes: Vec) -> Connection { - let mut edges = vec![]; - for (i, node) in nodes.into_iter().enumerate() { - let cursor = Cursor { - offset: self.offset + i as i32, - }; - edges.push(Edge { node, cursor }); - } - let (start_cursor, has_previous_page) = - if let Some(edge) = edges.first() { - (Some(edge.cursor), edge.cursor.offset > 0) - } else { - (None, false) - }; - let (end_cursor, has_next_page) = if let Some(edge) = edges.last() { - (Some(edge.cursor), edge.cursor.offset < self.total_count - 1) - } else { - (None, false) - }; - let page_info = PageInfo { - start_cursor, - end_cursor, - has_next_page, - has_previous_page, - }; - Connection { - total_count: self.total_count, - edges, - page_info, - } - } -} - -#[derive(Debug, Clone, Copy, PartialEq)] -pub struct Cursor { - offset: i32, -} - -impl Cursor { - /// Encode cursor as base64 - pub fn encode(&self) -> String { - base64_engine.encode(self.offset.to_string()) - } - - /// Decode cursor from base64 String - pub fn decode(value: &str) -> Result { - let bytes = base64_engine - .decode(value) - .context(DecodeBase64CursorSnafu)?; - let offset = std::str::from_utf8(&bytes) - .context(DecodeUTF8CursorSnafu)? - .parse::() - .context(ParseCursorSnafu)?; - Ok(Cursor { offset }) - } -} - -#[derive(Debug, PartialEq)] -pub struct Connection { - pub total_count: i32, - pub edges: Vec>, - pub page_info: PageInfo, -} - -#[derive(Debug, PartialEq)] -pub struct Edge { - pub node: N, - pub cursor: Cursor, -} - -#[derive(Debug, PartialEq)] -pub struct PageInfo { - pub start_cursor: Option, - pub end_cursor: Option, - pub has_next_page: bool, - pub has_previous_page: bool, -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn it_encodes_cursor() { - assert_eq!(Cursor { offset: 0 }.encode(), "MA=="); - assert_eq!(Cursor { offset: 1 }.encode(), "MQ=="); - assert_eq!(Cursor { offset: 2 }.encode(), "Mg=="); - assert_eq!(Cursor { offset: 1000 }.encode(), "MTAwMA=="); - assert_eq!(Cursor { offset: i32::MAX }.encode(), "MjE0NzQ4MzY0Nw=="); - } - - #[test] - fn it_decodes_cursor() { - assert_eq!(Cursor::decode("MA==").unwrap(), Cursor { offset: 0 }); - assert_eq!(Cursor::decode("MQ==").unwrap(), Cursor { offset: 1 }); - assert_eq!(Cursor::decode("Mg==").unwrap(), Cursor { offset: 2 }); - assert_eq!( - Cursor::decode("MTAwMA==").unwrap(), - Cursor { offset: 1000 } - ); - assert_eq!( - Cursor::decode("MjE0NzQ4MzY0Nw==").unwrap(), - Cursor { offset: i32::MAX } - ); - } - - #[test] - fn it_fails_to_decode_non_base64_cursor() { - assert!(matches!( - Cursor::decode("invalid").unwrap_err(), - Error::DecodeBase64CursorError { .. } - )) - } - - #[test] - fn it_fails_to_decode_invalid_string_cursor() { - assert!(matches!( - Cursor::decode("gA==").unwrap_err(), - Error::DecodeUTF8CursorError { .. } - )); - } - - #[test] - fn it_fails_to_decode_non_integer_cursor() { - assert!(matches!( - Cursor::decode("aW52YWxpZA==").unwrap_err(), - Error::ParseCursorError { .. } - )); - } - - #[test] - fn it_paginates_forward_by_default() { - assert_eq!( - Pagination::new(None, None, None, None, 1).unwrap(), - Pagination { - total_count: 1, - offset: 0, - limit: 1 - } - ); - assert_eq!( - Pagination::new(None, None, None, None, 10).unwrap(), - Pagination { - total_count: 10, - offset: 0, - limit: 10 - } - ); - assert_eq!( - Pagination::new( - None, - None, - None, - None, - DEFAULT_PAGINATION_LIMIT * 10 - ) - .unwrap(), - Pagination { - total_count: DEFAULT_PAGINATION_LIMIT * 10, - offset: 0, - limit: DEFAULT_PAGINATION_LIMIT - } - ); - } - - #[test] - fn it_paginates_with_zero_total_count() { - assert_eq!( - Pagination::new(Some(10), None, None, None, 0).unwrap(), - Pagination { - total_count: 0, - offset: 0, - limit: 0 - } - ); - assert_eq!( - Pagination::new(None, Some(10), None, None, 0).unwrap(), - Pagination { - total_count: 0, - offset: 0, - limit: 0 - } - ); - } - - #[test] - fn it_paginates_forward_with_bounded_limit() { - assert_eq!( - Pagination::new(Some(5), None, None, None, 10).unwrap(), - Pagination { - total_count: 10, - offset: 0, - limit: 5 - } - ); - let cursor = Cursor { offset: 2 }.encode(); - assert_eq!( - Pagination::new(Some(5), None, Some(cursor), None, 10).unwrap(), - Pagination { - total_count: 10, - offset: 3, - limit: 5 - } - ); - let cursor = Cursor { offset: 4 }.encode(); - assert_eq!( - Pagination::new(Some(5), None, Some(cursor), None, 10).unwrap(), - Pagination { - total_count: 10, - offset: 5, - limit: 5 - } - ); - } - - #[test] - fn it_paginates_forward_with_out_of_bounds_limit() { - let cursor = Cursor { offset: 7 }.encode(); - assert_eq!( - Pagination::new(Some(5), None, Some(cursor), None, 10).unwrap(), - Pagination { - total_count: 10, - offset: 8, - limit: 2 - } - ); - let cursor = Cursor { offset: 9 }.encode(); - assert_eq!( - Pagination::new(Some(5), None, Some(cursor), None, 10).unwrap(), - Pagination { - total_count: 10, - offset: 10, - limit: 0 - } - ); - } - - #[test] - fn it_paginates_backward_with_bounded_limit() { - assert_eq!( - Pagination::new(None, Some(5), None, None, 10).unwrap(), - Pagination { - total_count: 10, - offset: 5, - limit: 5 - } - ); - let cursor = Cursor { offset: 7 }.encode(); - assert_eq!( - Pagination::new(None, Some(5), None, Some(cursor), 10).unwrap(), - Pagination { - total_count: 10, - offset: 2, - limit: 5 - } - ); - let cursor = Cursor { offset: 5 }.encode(); - assert_eq!( - Pagination::new(None, Some(5), None, Some(cursor), 10).unwrap(), - Pagination { - total_count: 10, - offset: 0, - limit: 5 - } - ); - } - - #[test] - fn it_paginates_backward_with_out_of_bounds_limit() { - let cursor = Cursor { offset: 3 }.encode(); - assert_eq!( - Pagination::new(None, Some(5), None, Some(cursor), 10).unwrap(), - Pagination { - total_count: 10, - offset: 0, - limit: 3 - } - ); - let cursor = Cursor { offset: 0 }.encode(); - assert_eq!( - Pagination::new(None, Some(5), None, Some(cursor), 10).unwrap(), - Pagination { - total_count: 10, - offset: 0, - limit: 0 - } - ); - } - - #[test] - fn it_fails_to_paginate_when_mixing_backward_and_forward_args() { - let cursor = Cursor { offset: 0 }.encode(); - assert!(matches!( - Pagination::new(Some(1), Some(1), None, None, 1).unwrap_err(), - Error::MixedPaginationError {} - )); - assert!(matches!( - Pagination::new(None, Some(1), Some(cursor.clone()), None, 1) - .unwrap_err(), - Error::MixedPaginationError {} - )); - assert!(matches!( - Pagination::new(Some(1), None, None, Some(cursor.clone()), 1) - .unwrap_err(), - Error::MixedPaginationError {} - )); - assert!(matches!( - Pagination::new( - None, - None, - Some(cursor.clone()), - Some(cursor.clone()), - 1 - ) - .unwrap_err(), - Error::MixedPaginationError {} - )); - } - - #[test] - fn it_fails_to_paginate_when_limit_is_negative() { - assert!(matches!( - Pagination::new(Some(-1), None, None, None, 10).unwrap_err(), - Error::PaginationLimitError { arg } if arg == "first" - )); - assert!(matches!( - Pagination::new(None, Some(-1), None, None, 10).unwrap_err(), - Error::PaginationLimitError { arg } if arg == "last" - )); - } - - #[test] - fn it_fails_to_paginate_with_invalid_cursor() { - assert!(matches!( - Pagination::new(None, None, Some("invalid".to_owned()), None, 10) - .unwrap_err(), - Error::DecodeBase64CursorError { .. } - )); - assert!(matches!( - Pagination::new(None, None, None, Some("invalid".to_owned()), 10) - .unwrap_err(), - Error::DecodeBase64CursorError { .. } - )); - } - - #[test] - fn it_fails_to_paginate_with_cursor_out_of_range() { - let cursor = Cursor { offset: 10 }.encode(); - assert!(matches!( - Pagination::new(None, None, Some(cursor.clone()), None, 10) - .unwrap_err(), - Error::PaginationCursorError { arg } if arg == "after" - )); - assert!(matches!( - Pagination::new(None, None, None, Some(cursor.clone()), 10) - .unwrap_err(), - Error::PaginationCursorError { arg } if arg == "before" - )); - } - - #[test] - fn it_creates_connection_without_nodes() { - let pagination = Pagination { - total_count: 3, - offset: 0, - limit: 0, - }; - let connection = pagination.create_connection::(vec![]); - assert_eq!( - connection, - Connection { - total_count: 3, - edges: vec![], - page_info: PageInfo { - start_cursor: None, - end_cursor: None, - has_next_page: false, - has_previous_page: false, - } - } - ); - } - - #[test] - fn it_creates_connection_with_all_nodes() { - let pagination = Pagination { - total_count: 3, - offset: 0, - limit: 3, - }; - let connection = pagination.create_connection::(vec![ - "0".to_owned(), - "1".to_owned(), - "2".to_owned(), - ]); - assert_eq!( - connection, - Connection { - total_count: 3, - edges: vec![ - Edge { - node: "0".to_owned(), - cursor: Cursor { offset: 0 }, - }, - Edge { - node: "1".to_owned(), - cursor: Cursor { offset: 1 }, - }, - Edge { - node: "2".to_owned(), - cursor: Cursor { offset: 2 }, - }, - ], - page_info: PageInfo { - start_cursor: Some(Cursor { offset: 0 }), - end_cursor: Some(Cursor { offset: 2 }), - has_next_page: false, - has_previous_page: false, - } - } - ); - } - - #[test] - fn it_creates_connection_on_first_page() { - let pagination = Pagination { - total_count: 3, - offset: 0, - limit: 2, - }; - let connection = pagination - .create_connection::(vec!["0".to_owned(), "1".to_owned()]); - assert_eq!( - connection, - Connection { - total_count: 3, - edges: vec![ - Edge { - node: "0".to_owned(), - cursor: Cursor { offset: 0 }, - }, - Edge { - node: "1".to_owned(), - cursor: Cursor { offset: 1 }, - }, - ], - page_info: PageInfo { - start_cursor: Some(Cursor { offset: 0 }), - end_cursor: Some(Cursor { offset: 1 }), - has_next_page: true, - has_previous_page: false, - } - } - ); - } - - #[test] - fn it_creates_connection_on_last_page() { - let pagination = Pagination { - total_count: 3, - offset: 1, - limit: 2, - }; - let connection = pagination - .create_connection::(vec!["1".to_owned(), "2".to_owned()]); - assert_eq!( - connection, - Connection { - total_count: 3, - edges: vec![ - Edge { - node: "1".to_owned(), - cursor: Cursor { offset: 1 }, - }, - Edge { - node: "2".to_owned(), - cursor: Cursor { offset: 2 }, - }, - ], - page_info: PageInfo { - start_cursor: Some(Cursor { offset: 1 }), - end_cursor: Some(Cursor { offset: 2 }), - has_next_page: false, - has_previous_page: true, - } - } - ); - } - - #[test] - fn it_creates_connection_on_middle_page() { - let pagination = Pagination { - total_count: 3, - offset: 1, - limit: 1, - }; - let connection = - pagination.create_connection::(vec!["1".to_owned()]); - assert_eq!( - connection, - Connection { - total_count: 3, - edges: vec![Edge { - node: "1".to_owned(), - cursor: Cursor { offset: 1 }, - },], - page_info: PageInfo { - start_cursor: Some(Cursor { offset: 1 }), - end_cursor: Some(Cursor { offset: 1 }), - has_next_page: true, - has_previous_page: true, - } - } - ); - } -} diff --git a/offchain/data/src/repository.rs b/offchain/data/src/repository.rs deleted file mode 100644 index f65b67b58..000000000 --- a/offchain/data/src/repository.rs +++ /dev/null @@ -1,318 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use backoff::ExponentialBackoff; -use diesel::pg::{Pg, PgConnection}; -use diesel::r2d2::{ConnectionManager, Pool, PooledConnection}; -use diesel::{insert_into, prelude::*, update}; -use snafu::ResultExt; -use std::sync::Arc; - -use super::config::RepositoryConfig; -use super::error::{DatabaseConnectionSnafu, DatabaseSnafu, Error}; -use super::pagination::{Connection, Pagination}; -use super::schema; -use super::types::{ - CompletionStatus, Input, InputQueryFilter, Notice, NoticeQueryFilter, - OutputEnum, Proof, Report, ReportQueryFilter, Voucher, VoucherQueryFilter, -}; - -pub const POOL_CONNECTION_SIZE: u32 = 3; - -#[derive(Clone, Debug)] -pub struct Repository { - // Connection is not thread safe to share between threads, we use connection pool - db_pool: Arc>>, - backoff: ExponentialBackoff, -} - -impl Repository { - /// Create database connection pool, wait until database server is available with backoff strategy - pub fn new(config: RepositoryConfig) -> Result { - let db_pool = backoff::retry(config.backoff.clone(), || { - tracing::info!(?config, "trying to create db pool for database"); - Pool::builder() - .max_size(POOL_CONNECTION_SIZE) - .build(ConnectionManager::::new( - config.endpoint(), - )) - .map_err(backoff::Error::transient) - }) - .context(DatabaseConnectionSnafu)?; - Ok(Self { - db_pool: Arc::new(db_pool), - backoff: config.backoff, - }) - } - - /// Obtain the connection from the connection pool - fn conn( - &self, - ) -> Result>, Error> { - backoff::retry(self.backoff.clone(), || { - self.db_pool.get().map_err(backoff::Error::transient) - }) - .context(DatabaseConnectionSnafu) - } -} - -/// Basic queries that fetch by primary_key -impl Repository { - pub fn get_input(&self, index: i32) -> Result { - use schema::inputs::dsl; - let mut conn = self.conn()?; - dsl::inputs - .filter(dsl::index.eq(index)) - .load::(&mut conn) - .context(DatabaseSnafu)? - .pop() - .ok_or(Error::ItemNotFound { - item_type: "input".to_owned(), - }) - } - - pub fn get_voucher( - &self, - index: i32, - input_index: i32, - ) -> Result { - let mut conn = self.conn()?; - use schema::vouchers::dsl; - dsl::vouchers - .filter(dsl::index.eq(index)) - .filter(dsl::input_index.eq(input_index)) - .load::(&mut conn) - .context(DatabaseSnafu)? - .pop() - .ok_or(Error::ItemNotFound { - item_type: "voucher".to_owned(), - }) - } - - pub fn get_notice( - &self, - index: i32, - input_index: i32, - ) -> Result { - use schema::notices::dsl; - let mut conn = self.conn()?; - dsl::notices - .filter(dsl::index.eq(index)) - .filter(dsl::input_index.eq(input_index)) - .load::(&mut conn) - .context(DatabaseSnafu)? - .pop() - .ok_or(Error::ItemNotFound { - item_type: "notice".to_owned(), - }) - } - - pub fn get_report( - &self, - index: i32, - input_index: i32, - ) -> Result { - use schema::reports::dsl; - let mut conn = self.conn()?; - dsl::reports - .filter(dsl::index.eq(index)) - .filter(dsl::input_index.eq(input_index)) - .load::(&mut conn) - .context(DatabaseSnafu)? - .pop() - .ok_or(Error::ItemNotFound { - item_type: "report".to_owned(), - }) - } - - pub fn get_proof( - &self, - input_index: i32, - output_index: i32, - output_enum: OutputEnum, - ) -> Result, Error> { - use schema::proofs::dsl; - let mut conn = self.conn()?; - dsl::proofs - .filter(dsl::input_index.eq(input_index)) - .filter(dsl::output_index.eq(output_index)) - .filter(dsl::output_enum.eq(output_enum)) - .load::(&mut conn) - .map(|mut proofs| proofs.pop()) - .context(DatabaseSnafu) - } -} - -/// Basic queries to insert rollups' outputs -impl Repository { - pub fn insert_input(&self, input: Input) -> Result<(), Error> { - use schema::inputs; - let mut conn = self.conn()?; - insert_into(inputs::table) - .values(&input) - .on_conflict_do_nothing() - .execute(&mut conn) - .context(DatabaseSnafu)?; - tracing::trace!("Input {} was written to the db", input.index); - Ok(()) - } - - pub fn insert_notice(&self, notice: Notice) -> Result<(), Error> { - use schema::notices; - let mut conn = self.conn()?; - insert_into(notices::table) - .values(¬ice) - .on_conflict_do_nothing() - .execute(&mut conn) - .context(DatabaseSnafu)?; - tracing::trace!( - "Notice {} from Input {} was written to the db", - notice.index, - notice.input_index - ); - Ok(()) - } - - pub fn insert_voucher(&self, voucher: Voucher) -> Result<(), Error> { - use schema::vouchers; - let mut conn = self.conn()?; - insert_into(vouchers::table) - .values(&voucher) - .on_conflict_do_nothing() - .execute(&mut conn) - .context(DatabaseSnafu)?; - tracing::trace!( - "Voucher {} from Input {} was written to the db", - voucher.index, - voucher.input_index - ); - Ok(()) - } - - pub fn insert_report(&self, report: Report) -> Result<(), Error> { - use schema::reports; - let mut conn = self.conn()?; - insert_into(reports::table) - .values(&report) - .on_conflict_do_nothing() - .execute(&mut conn) - .context(DatabaseSnafu)?; - tracing::trace!( - "Report {} from Input {} was written to the db", - report.index, - report.input_index - ); - Ok(()) - } - - pub fn insert_proof(&self, proof: Proof) -> Result<(), Error> { - use schema::proofs; - let mut conn = self.conn()?; - insert_into(proofs::table) - .values(&proof) - .on_conflict_do_nothing() - .execute(&mut conn) - .context(DatabaseSnafu)?; - tracing::trace!( - "Proof for {:?} {} of Input {} was written to the db", - proof.output_enum, - proof.output_index, - proof.input_index - ); - Ok(()) - } -} - -/// Update operations -impl Repository { - pub fn update_input_status( - &self, - input_index: i32, - status: CompletionStatus, - ) -> Result<(), Error> { - use schema::inputs; - let mut conn = self.conn()?; - update(inputs::table) - .filter(inputs::dsl::index.eq(input_index)) - .set(inputs::status.eq(status)) - .execute(&mut conn) - .context(DatabaseSnafu)?; - tracing::trace!("Set {:?} status to input {}", status, input_index); - Ok(()) - } -} - -/// Generate a boxed query from an input query filter -impl InputQueryFilter { - fn to_query(&self) -> schema::inputs::BoxedQuery<'_, Pg> { - use schema::inputs::dsl; - let mut query = dsl::inputs.into_boxed(); - if let Some(other) = self.index_greater_than { - query = query.filter(dsl::index.gt(other)); - } - if let Some(other) = self.index_lower_than { - query = query.filter(dsl::index.lt(other)); - } - query - } -} - -/// Generate a boxed query from an output query filter -macro_rules! impl_output_filter_to_query { - ($filter: ty, $table: ident) => { - impl $filter { - fn to_query(&self) -> schema::$table::BoxedQuery<'_, Pg> { - use schema::$table::dsl; - let mut query = dsl::$table.into_boxed(); - if let Some(other) = self.input_index { - query = query.filter(dsl::input_index.eq(other)); - } - query - } - } - }; -} - -impl_output_filter_to_query!(VoucherQueryFilter, vouchers); -impl_output_filter_to_query!(NoticeQueryFilter, notices); -impl_output_filter_to_query!(ReportQueryFilter, reports); - -/// Implement a paginated query for the given table -macro_rules! impl_paginated_query { - ($query: ident, $table: ident, $node: ty, $filter: ty) => { - impl Repository { - pub fn $query( - &self, - first: Option, - last: Option, - after: Option, - before: Option, - filter: $filter, - ) -> Result, Error> { - let mut conn = self.conn()?; - let query = filter.to_query().count(); - let count = query - .get_result::(&mut conn) - .context(DatabaseSnafu)?; - let pagination = - Pagination::new(first, last, after, before, count as i32)?; - let nodes = if pagination.limit() > 0 { - let query = filter - .to_query() - .limit(pagination.limit().into()) - .offset(pagination.offset().into()) - .order(schema::$table::dsl::$table.primary_key()); - query.load(&mut conn).context(DatabaseSnafu)? - } else { - vec![] - }; - Ok(pagination.create_connection(nodes)) - } - } - }; -} - -impl_paginated_query!(get_inputs, inputs, Input, InputQueryFilter); -impl_paginated_query!(get_vouchers, vouchers, Voucher, VoucherQueryFilter); -impl_paginated_query!(get_notices, notices, Notice, NoticeQueryFilter); -impl_paginated_query!(get_reports, reports, Report, ReportQueryFilter); diff --git a/offchain/data/src/schema.rs b/offchain/data/src/schema.rs deleted file mode 100644 index 69e188ca5..000000000 --- a/offchain/data/src/schema.rs +++ /dev/null @@ -1,79 +0,0 @@ -// @generated automatically by Diesel CLI. - -pub mod sql_types { - #[derive(diesel::sql_types::SqlType)] - #[diesel(postgres_type(name = "CompletionStatus"))] - pub struct CompletionStatus; - - #[derive(diesel::sql_types::SqlType)] - #[diesel(postgres_type(name = "OutputEnum"))] - pub struct OutputEnum; -} - -diesel::table! { - use diesel::sql_types::*; - use super::sql_types::CompletionStatus; - - inputs (index) { - index -> Int4, - msg_sender -> Bytea, - tx_hash -> Bytea, - block_number -> Int8, - timestamp -> Timestamp, - payload -> Bytea, - status -> CompletionStatus, - } -} - -diesel::table! { - notices (input_index, index) { - input_index -> Int4, - index -> Int4, - payload -> Bytea, - } -} - -diesel::table! { - use diesel::sql_types::*; - use super::sql_types::OutputEnum; - - proofs (input_index, output_index, output_enum) { - input_index -> Int4, - output_index -> Int4, - output_enum -> OutputEnum, - validity_input_index_within_epoch -> Int4, - validity_output_index_within_input -> Int4, - validity_output_hashes_root_hash -> Bytea, - validity_vouchers_epoch_root_hash -> Bytea, - validity_notices_epoch_root_hash -> Bytea, - validity_machine_state_hash -> Bytea, - validity_output_hash_in_output_hashes_siblings -> Array>, - validity_output_hashes_in_epoch_siblings -> Array>, - context -> Bytea, - } -} - -diesel::table! { - reports (input_index, index) { - input_index -> Int4, - index -> Int4, - payload -> Bytea, - } -} - -diesel::table! { - vouchers (input_index, index) { - input_index -> Int4, - index -> Int4, - destination -> Bytea, - payload -> Bytea, - } -} - -diesel::joinable!(notices -> inputs (input_index)); -diesel::joinable!(reports -> inputs (input_index)); -diesel::joinable!(vouchers -> inputs (input_index)); - -diesel::allow_tables_to_appear_in_same_query!( - inputs, notices, proofs, reports, vouchers, -); diff --git a/offchain/data/src/types.rs b/offchain/data/src/types.rs deleted file mode 100644 index 40d9187c0..000000000 --- a/offchain/data/src/types.rs +++ /dev/null @@ -1,173 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use diesel::deserialize::{self, FromSql, FromSqlRow}; -use diesel::pg::{Pg, PgValue}; -use diesel::serialize::{self, IsNull, Output, ToSql}; -use diesel::{AsExpression, Insertable, Queryable, QueryableByName}; -use std::io::Write; - -use super::schema::{ - inputs, notices, proofs, reports, - sql_types::CompletionStatus as SQLCompletionStatus, - sql_types::OutputEnum as SQLOutputEnum, vouchers, -}; - -#[derive(Debug, PartialEq, Eq, Clone, Copy, FromSqlRow, AsExpression)] -#[diesel(sql_type = SQLCompletionStatus)] -pub enum CompletionStatus { - Unprocessed, - Accepted, - Rejected, - Exception, - MachineHalted, - CycleLimitExceeded, - TimeLimitExceeded, - PayloadLengthLimitExceeded, -} - -impl ToSql for CompletionStatus { - fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { - match *self { - CompletionStatus::Unprocessed => out.write_all(b"Unprocessed")?, - CompletionStatus::Accepted => out.write_all(b"Accepted")?, - CompletionStatus::Rejected => out.write_all(b"Rejected")?, - CompletionStatus::Exception => out.write_all(b"Exception")?, - CompletionStatus::MachineHalted => { - out.write_all(b"MachineHalted")? - } - CompletionStatus::CycleLimitExceeded => { - out.write_all(b"CycleLimitExceeded")? - } - CompletionStatus::TimeLimitExceeded => { - out.write_all(b"TimeLimitExceeded")? - } - CompletionStatus::PayloadLengthLimitExceeded => { - out.write_all(b"PayloadLengthLimitExceeded")? - } - } - Ok(IsNull::No) - } -} - -impl FromSql for CompletionStatus { - fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { - match bytes.as_bytes() { - b"Unprocessed" => Ok(CompletionStatus::Unprocessed), - b"Accepted" => Ok(CompletionStatus::Accepted), - b"Rejected" => Ok(CompletionStatus::Rejected), - b"Exception" => Ok(CompletionStatus::Exception), - b"MachineHalted" => Ok(CompletionStatus::MachineHalted), - b"CycleLimitExceeded" => Ok(CompletionStatus::CycleLimitExceeded), - b"TimeLimitExceeded" => Ok(CompletionStatus::TimeLimitExceeded), - b"PayloadLengthLimitExceeded" => { - Ok(CompletionStatus::PayloadLengthLimitExceeded) - } - _ => Err("Unrecognized enum variant".into()), - } - } -} - -#[derive(Clone, Debug, Insertable, PartialEq, Queryable, QueryableByName)] -#[diesel(table_name = inputs)] -pub struct Input { - pub index: i32, - pub msg_sender: Vec, - pub tx_hash: Vec, - pub block_number: i64, - pub timestamp: std::time::SystemTime, - pub payload: Vec, - pub status: CompletionStatus, -} - -#[derive(Clone, Debug, Insertable, PartialEq, Queryable, QueryableByName)] -#[diesel(table_name = notices)] -pub struct Notice { - pub input_index: i32, - pub index: i32, - pub payload: Vec, -} - -#[derive(Clone, Debug, Insertable, PartialEq, Queryable, QueryableByName)] -#[diesel(table_name = vouchers)] -pub struct Voucher { - pub input_index: i32, - pub index: i32, - pub destination: Vec, - pub payload: Vec, -} - -#[derive(Clone, Debug, Insertable, PartialEq, Queryable, QueryableByName)] -#[diesel(table_name = reports)] -pub struct Report { - pub input_index: i32, - pub index: i32, - pub payload: Vec, -} - -#[derive(Debug, PartialEq, Eq, Clone, Copy, FromSqlRow, AsExpression)] -#[diesel(sql_type = SQLOutputEnum)] -pub enum OutputEnum { - Voucher, - Notice, -} - -impl ToSql for OutputEnum { - fn to_sql<'b>(&'b self, out: &mut Output<'b, '_, Pg>) -> serialize::Result { - match *self { - OutputEnum::Voucher => out.write_all(b"voucher")?, - OutputEnum::Notice => out.write_all(b"notice")?, - } - Ok(IsNull::No) - } -} - -impl FromSql for OutputEnum { - fn from_sql(bytes: PgValue<'_>) -> deserialize::Result { - match bytes.as_bytes() { - b"voucher" => Ok(OutputEnum::Voucher), - b"notice" => Ok(OutputEnum::Notice), - _ => Err("Unrecognized enum variant".into()), - } - } -} - -impl diesel::query_builder::QueryId for SQLOutputEnum { - type QueryId = SQLOutputEnum; -} - -#[derive(Clone, Debug, Insertable, PartialEq, Queryable, QueryableByName)] -#[diesel(table_name = proofs)] -pub struct Proof { - pub input_index: i32, - pub output_index: i32, - pub output_enum: OutputEnum, - pub validity_input_index_within_epoch: i32, - pub validity_output_index_within_input: i32, - pub validity_output_hashes_root_hash: Vec, - pub validity_vouchers_epoch_root_hash: Vec, - pub validity_notices_epoch_root_hash: Vec, - pub validity_machine_state_hash: Vec, - pub validity_output_hash_in_output_hashes_siblings: Vec>>, - pub validity_output_hashes_in_epoch_siblings: Vec>>, - pub context: Vec, -} - -#[derive(Debug, Default)] -pub struct InputQueryFilter { - pub index_greater_than: Option, - pub index_lower_than: Option, -} - -macro_rules! decl_output_filter { - ($name: ident) => { - #[derive(Debug, Default)] - pub struct $name { - pub input_index: Option, - } - }; -} - -decl_output_filter!(VoucherQueryFilter); -decl_output_filter!(NoticeQueryFilter); -decl_output_filter!(ReportQueryFilter); diff --git a/offchain/data/tests/migrations.rs b/offchain/data/tests/migrations.rs deleted file mode 100644 index 10a93d0cf..000000000 --- a/offchain/data/tests/migrations.rs +++ /dev/null @@ -1,53 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use diesel::{pg::PgConnection, prelude::*, sql_query}; -use testcontainers::{clients::Cli, images::postgres::Postgres, RunnableImage}; - -const POSTGRES_PASSWORD: &'static str = "pw"; - -fn postgres_endpoint(port: u16) -> String { - format!( - "postgres://postgres:{}@localhost:{}/postgres", - POSTGRES_PASSWORD, port - ) -} - -table! { - pg_tables (tablename) { - tablename -> VarChar, - } -} - -#[derive(Debug, QueryableByName)] -#[diesel(table_name = pg_tables)] -pub struct PgTable { - pub tablename: String, -} - -#[test_log::test(test)] -fn run_migrations() { - tracing::info!("setting up Postgres container"); - let docker = Cli::default(); - let image = RunnableImage::from(Postgres::default()) - .with_tag("13") - .with_env_var(("POSTGRES_PASSWORD", POSTGRES_PASSWORD)); - let postgres = docker.run(image); - let endpoint = postgres_endpoint(postgres.get_host_port_ipv4(5432)); - - tracing::info!("running migrations"); - rollups_data::run_migrations(&endpoint).expect("failed to run migrations"); - - tracing::info!("checking whether migrations run in DB"); - let mut connection = PgConnection::establish(&endpoint) - .expect("failed to establish connection"); - let tables = sql_query("SELECT tablename FROM pg_tables;") - .load::(&mut connection) - .expect("failed to run query"); - - let expected_tables = - vec!["inputs", "vouchers", "notices", "reports", "proofs"]; - for expected in expected_tables { - assert!(tables.iter().find(|t| t.tablename == expected).is_some()); - } -} diff --git a/offchain/data/tests/repository.rs b/offchain/data/tests/repository.rs deleted file mode 100644 index 8b406ccc4..000000000 --- a/offchain/data/tests/repository.rs +++ /dev/null @@ -1,726 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use backoff::ExponentialBackoffBuilder; -use diesel::pg::Pg; -use diesel::{ - sql_query, Connection, PgConnection, QueryableByName, RunQueryDsl, -}; -use rollups_data::Connection as PaginationConnection; -use rollups_data::{ - CompletionStatus, Cursor, Edge, Error, Input, InputQueryFilter, Notice, - PageInfo, Proof, RedactedUrl, Report, Repository, RepositoryConfig, Url, - Voucher, -}; -use serial_test::serial; -use std::io::Write; -use std::os::unix::fs::PermissionsExt; -use std::time::{Duration, UNIX_EPOCH}; -use test_fixtures::DataFixture; -use test_log::test; -use testcontainers::clients::Cli; - -const BACKOFF_DURATION: u64 = 120000; - -struct TestState<'d> { - data: DataFixture<'d>, -} - -impl TestState<'_> { - fn setup(docker: &Cli) -> TestState<'_> { - let data = DataFixture::setup(docker); - TestState { data } - } - - pub fn get_repository(&self) -> Repository { - let backoff = ExponentialBackoffBuilder::new() - .with_max_elapsed_time(Some(Duration::from_millis( - BACKOFF_DURATION, - ))) - .build(); - - let redacted_endpoint = Some(RedactedUrl::new( - Url::parse(&format!( - "postgres://{}:{}@{}:{}/{}", - self.data.user, - self.data.password, - self.data.hostname, - self.data.port, - self.data.db, - )) - .expect("failed to generate Postgres endpoint"), - )); - Repository::new(RepositoryConfig { - redacted_endpoint, - connection_pool_size: 3, - backoff, - }) - .expect("Repository should have connected successfully") - } - - pub fn get_from_sql + 'static>( - &self, - query: &str, - ) -> T { - let mut conn = PgConnection::establish(&self.data.endpoint) - .expect("failed to connect to db"); - sql_query(query) - .load::(&mut conn) - .expect("failed to run query") - .pop() - .expect("query returned no results") - } -} - -pub fn insert_test_input(repo: &Repository) { - let input = Input { - index: 0, - msg_sender: "msg-sender".as_bytes().to_vec(), - tx_hash: "tx-hash".as_bytes().to_vec(), - block_number: 0, - timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), - payload: "input-0".as_bytes().to_vec(), - status: CompletionStatus::Accepted, - }; - - repo.insert_input(input) - .expect("The input should've been inserted") -} - -pub fn create_input() -> Input { - Input { - index: 0, - msg_sender: "msg-sender".as_bytes().to_vec(), - tx_hash: "tx-hash".as_bytes().to_vec(), - block_number: 0, - timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), - payload: "input-0".as_bytes().to_vec(), - status: CompletionStatus::Accepted, - } -} - -#[test] -#[serial] -fn test_create_repository() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - - // Since we create the repository for every test, we created an auxiliary function - // to do so, and to avoid code duplication, we are calling this function here - test.get_repository(); -} - -#[test] -#[serial] -fn test_fail_to_create_repository() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - - let backoff = ExponentialBackoffBuilder::new() - .with_max_elapsed_time(Some(Duration::from_millis(2000))) - .build(); - - let redacted_endpoint = Some(RedactedUrl::new( - Url::parse(&format!( - "postgres://{}:{}@{}:{}/{}", - "Err", - test.data.password, - test.data.hostname, - test.data.port, - test.data.db, - )) - .expect("failed to generate Postgres endpoint"), - )); - let err = Repository::new(RepositoryConfig { - redacted_endpoint, - connection_pool_size: 3, - backoff, - }) - .expect_err("Repository::new should fail"); - - assert!(matches!(err, Error::DatabaseConnectionError { source: _ })); -} - -#[test] -#[serial] -fn test_postgres_file_configuration() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - - // Create Postgres pgpass file - let tempdir = tempfile::tempdir().expect("failed to create tempdir"); - let pgpass_path = - tempdir.path().join("pgpass").to_string_lossy().to_string(); - tracing::info!("Storing pgpass to {}", pgpass_path); - { - let mut pgpass = std::fs::File::create(&pgpass_path) - .expect("failed to create pgpass"); - // Set permission to 600 - let metadata = - pgpass.metadata().expect("failed to get pgpass metadata"); - let mut permissions = metadata.permissions(); - permissions.set_mode(0o600); - pgpass - .set_permissions(permissions) - .expect("failed to set pgpass permissions"); - // Write pgpass contents - write!( - &mut pgpass, - "{}:{}:{}:{}:{}\n", - test.data.hostname, - test.data.port, - test.data.db, - test.data.user, - test.data.password - ) - .expect("failed to write pgpass"); - } - - // Set Postgres environment variables - std::env::set_var("PGPASSFILE", pgpass_path); - std::env::set_var("PGHOST", test.data.hostname); - std::env::set_var("PGPORT", test.data.port.to_string()); - std::env::set_var("PGDATABASE", test.data.db); - std::env::set_var("PGUSER", test.data.user); - - // Connect to postgres using pgpass file - let backoff = ExponentialBackoffBuilder::new() - .with_max_elapsed_time(Some(Duration::from_millis(100))) - .build(); - Repository::new(RepositoryConfig { - redacted_endpoint: None, - connection_pool_size: 3, - backoff, - }) - .expect("failed to create repository"); -} - -#[test] -#[serial] -fn test_insert_input() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - let input = create_input(); - - repo.insert_input(input.clone()) - .expect("Failed to insert input"); - - let result: Input = test.get_from_sql("Select * from inputs"); - - assert_eq!(result, input); -} - -#[test] -#[serial] -fn test_get_input() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - let input = create_input(); - - repo.insert_input(input.clone()) - .expect("Failed to insert input"); - - let get_input = repo.get_input(0).expect("Failed to get input"); - - assert_eq!(input, get_input); -} - -#[test] -#[serial] -fn test_get_input_error() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - let input = create_input(); - - repo.insert_input(input.clone()) - .expect("Failed to insert input"); - - let input_error = repo.get_input(1).expect_err("Get input should fail"); - - assert!(matches!( - input_error, - Error::ItemNotFound { item_type } if item_type == "input" - )); -} - -#[test] -#[serial] -fn test_update_input_status() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - let mut input = create_input(); - input.status = CompletionStatus::Unprocessed; - - repo.insert_input(input.clone()) - .expect("Failed to insert input"); - repo.update_input_status(0, CompletionStatus::Accepted) - .expect("Failed to update input status"); - - let get_input = repo.get_input(0).expect("Failed to get input"); - - assert_eq!(get_input.status, CompletionStatus::Accepted); -} - -#[test] -#[serial] -fn test_insert_notice() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let notice = Notice { - input_index: 0, - index: 0, - payload: "notice-0-0".as_bytes().to_vec(), - }; - - repo.insert_notice(notice.clone()) - .expect("Failed to insert notice"); - - let result: Notice = test.get_from_sql("Select * from notices"); - - assert_eq!(result, notice); -} - -#[test] -#[serial] -fn test_get_notice() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let notice = Notice { - input_index: 0, - index: 0, - payload: "notice-0-0".as_bytes().to_vec(), - }; - - repo.insert_notice(notice.clone()) - .expect("Failed to insert notice"); - - let get_notice = repo - .get_notice(0, 0) - .expect("Get notice should have returned a value"); - - assert_eq!(notice, get_notice); -} - -#[test] -#[serial] -fn test_insert_notice_error() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let notice = Notice { - input_index: 1, - index: 0, - payload: "notice-0-0".as_bytes().to_vec(), - }; - let notice_error = repo - .insert_notice(notice.clone()) - .expect_err("Insert notice should fail"); - - assert!(matches!(notice_error, Error::DatabaseError { source: _ })); -} - -#[test] -#[serial] -fn test_get_notice_error() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let notice = Notice { - input_index: 0, - index: 0, - payload: "notice-0-0".as_bytes().to_vec(), - }; - repo.insert_notice(notice.clone()) - .expect("Insert notice should succeed"); - - let notice_error = - repo.get_notice(1, 1).expect_err("Get notice should fail"); - - assert!(matches!( - notice_error, - Error::ItemNotFound { item_type } if item_type == "notice" - )); -} - -#[test] -#[serial] -fn test_insert_voucher() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let voucher = Voucher { - input_index: 0, - index: 0, - destination: "destination".as_bytes().to_vec(), - payload: "voucher-0-0".as_bytes().to_vec(), - }; - - repo.insert_voucher(voucher.clone()) - .expect("Insert voucher should succeed"); - - let result: Voucher = test.get_from_sql("Select * from vouchers"); - - assert_eq!(result, voucher); -} - -#[test] -#[serial] -fn test_get_voucher() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let voucher = Voucher { - input_index: 0, - index: 0, - destination: "destination".as_bytes().to_vec(), - payload: "voucher-0-0".as_bytes().to_vec(), - }; - repo.insert_voucher(voucher.clone()) - .expect("Insert voucher should succeed"); - - let get_voucher = - repo.get_voucher(0, 0).expect("Get voucher should succeed"); - - assert_eq!(voucher, get_voucher); -} - -#[test] -#[serial] -fn test_insert_voucher_error() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let voucher = Voucher { - input_index: 1, - index: 0, - destination: "destination".as_bytes().to_vec(), - payload: "voucher-1-0".as_bytes().to_vec(), - }; - let voucher_error = repo - .insert_voucher(voucher.clone()) - .expect_err("Insert voucher should fail"); - - assert!(matches!(voucher_error, Error::DatabaseError { source: _ })); -} - -#[test] -#[serial] -fn test_get_voucher_error() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let voucher = Voucher { - input_index: 0, - index: 0, - destination: "destination".as_bytes().to_vec(), - payload: "voucher-0-0".as_bytes().to_vec(), - }; - repo.insert_voucher(voucher.clone()) - .expect("Insert voucher should succeed"); - - let voucher_error = - repo.get_voucher(1, 1).expect_err("Get voucher should fail"); - - assert!(matches!( - voucher_error, - Error::ItemNotFound { item_type } if item_type == "voucher" - )); -} - -#[test] -#[serial] -fn test_insert_report() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let report = Report { - input_index: 0, - index: 0, - payload: "report-0-0".as_bytes().to_vec(), - }; - - repo.insert_report(report.clone()) - .expect("Insert report should succeed"); - - let result: Report = test.get_from_sql("Select * from reports"); - - assert_eq!(result, report); -} - -#[test] -#[serial] -fn test_get_report() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let report = Report { - input_index: 0, - index: 0, - payload: "report-0-0".as_bytes().to_vec(), - }; - repo.insert_report(report.clone()) - .expect("Insert report should succeed"); - - let get_report = repo.get_report(0, 0).expect("Get report should succeed"); - - assert_eq!(report, get_report); -} - -#[test] -#[serial] -fn test_insert_report_error() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let report = Report { - input_index: 1, - index: 0, - payload: "report-1-0".as_bytes().to_vec(), - }; - let report_error = repo - .insert_report(report.clone()) - .expect_err("Insert report should fail"); - - assert!(matches!(report_error, Error::DatabaseError { source: _ })); -} - -#[test] -#[serial] -fn test_get_report_error() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - insert_test_input(&repo); - - let report = Report { - input_index: 0, - index: 0, - payload: "report-0-0".as_bytes().to_vec(), - }; - repo.insert_report(report.clone()) - .expect("Insert report should succeed"); - - let report_error = - repo.get_report(1, 1).expect_err("Get report should fail"); - - assert!(matches!( - report_error, - Error::ItemNotFound { item_type } if item_type == "report" - )); -} - -#[test] -#[serial] -fn test_insert_proof() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - let proof = Proof { - input_index: 0, - output_index: 0, - output_enum: rollups_data::OutputEnum::Voucher, - validity_input_index_within_epoch: 0, - validity_output_index_within_input: 0, - validity_output_hashes_root_hash: "".as_bytes().to_vec(), - validity_vouchers_epoch_root_hash: "".as_bytes().to_vec(), - validity_notices_epoch_root_hash: "".as_bytes().to_vec(), - validity_machine_state_hash: "".as_bytes().to_vec(), - validity_output_hash_in_output_hashes_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - validity_output_hashes_in_epoch_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - context: "".as_bytes().to_vec(), - }; - - repo.insert_proof(proof.clone()) - .expect("Insert proof should succeed"); - - let result: Proof = test.get_from_sql("Select * from proofs"); - - assert_eq!(result, proof); -} - -#[test] -#[serial] -fn test_get_proof() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - let proof = Proof { - input_index: 0, - output_index: 0, - output_enum: rollups_data::OutputEnum::Voucher, - validity_input_index_within_epoch: 0, - validity_output_index_within_input: 0, - validity_output_hashes_root_hash: "".as_bytes().to_vec(), - validity_vouchers_epoch_root_hash: "".as_bytes().to_vec(), - validity_notices_epoch_root_hash: "".as_bytes().to_vec(), - validity_machine_state_hash: "".as_bytes().to_vec(), - validity_output_hash_in_output_hashes_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - validity_output_hashes_in_epoch_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - context: "".as_bytes().to_vec(), - }; - repo.insert_proof(proof.clone()) - .expect("Insert proof should succeed"); - - let get_proof = repo - .get_proof(0, 0, rollups_data::OutputEnum::Voucher) - .unwrap() - .expect("Get proof should succeed"); - - assert_eq!(proof, get_proof); -} - -#[test] -#[serial] -fn test_get_proof_error() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - let proof = Proof { - input_index: 0, - output_index: 0, - output_enum: rollups_data::OutputEnum::Voucher, - validity_input_index_within_epoch: 0, - validity_output_index_within_input: 0, - validity_output_hashes_root_hash: "".as_bytes().to_vec(), - validity_vouchers_epoch_root_hash: "".as_bytes().to_vec(), - validity_notices_epoch_root_hash: "".as_bytes().to_vec(), - validity_machine_state_hash: "".as_bytes().to_vec(), - validity_output_hash_in_output_hashes_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - validity_output_hashes_in_epoch_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - context: "".as_bytes().to_vec(), - }; - repo.insert_proof(proof.clone()) - .expect("Insert proof should succeed"); - - let proof_error = repo.get_proof(1, 1, rollups_data::OutputEnum::Voucher); - - match proof_error { - Ok(None) => assert!(true), - Ok(Some(_proof)) => assert!(false), - Err(_) => assert!(false), - } -} - -#[test] -#[serial] -fn test_pagination_macro() { - let docker = Cli::default(); - let test = TestState::setup(&docker); - let repo = test.get_repository(); - - let input0 = create_input(); - - let input1 = Input { - index: 1, - msg_sender: "msg-sender".as_bytes().to_vec(), - tx_hash: "tx-hash".as_bytes().to_vec(), - block_number: 0, - timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), - payload: "input-1".as_bytes().to_vec(), - status: CompletionStatus::Accepted, - }; - - repo.insert_input(input0.clone()) - .expect("Insert input should succeed"); - repo.insert_input(input1.clone()) - .expect("Insert input should succeed"); - - let query_filter = InputQueryFilter { - index_greater_than: Some(-1), - index_lower_than: Some(5), - }; - - let pagination_connection = repo - .get_inputs(Some(5), None, None, None, query_filter) - .expect("The macro should work, creating a pagination connection"); - - assert_eq!( - pagination_connection, - PaginationConnection { - total_count: 2, - edges: vec![ - Edge { - node: input0, - cursor: Cursor::decode("MA==") - .expect("Should create cursor with correct offset"), - }, - Edge { - node: input1, - cursor: Cursor::decode("MQ==") - .expect("Should create cursor with correct offset"), - }, - ], - page_info: PageInfo { - start_cursor: Some( - Cursor::decode("MA==") - .expect("Should create cursor with correct offset") - ), - end_cursor: Some( - Cursor::decode("MQ==") - .expect("Should create cursor with correct offset") - ), - has_next_page: false, - has_previous_page: false, - }, - } - ); -} diff --git a/offchain/data/util/populate.sql b/offchain/data/util/populate.sql deleted file mode 100644 index c1793d4f5..000000000 --- a/offchain/data/util/populate.sql +++ /dev/null @@ -1,40 +0,0 @@ --- insert inputs -INSERT INTO inputs VALUES (0, 'msg-sender', 'tx-hash', 0, current_timestamp, 'input-0'); -INSERT INTO inputs VALUES (1, 'msg-sender', 'tx-hash', 0, current_timestamp, 'input-1'); -INSERT INTO inputs VALUES (2, 'msg-sender', 'tx-hash', 0, current_timestamp, 'input-2'); - --- insert notices -INSERT INTO notices VALUES (0, 0, 'notice-0-0'); -INSERT INTO notices VALUES (0, 1, 'notice-0-1'); -INSERT INTO notices VALUES (0, 2, 'notice-0-2'); -INSERT INTO notices VALUES (1, 0, 'notice-1-0'); -INSERT INTO notices VALUES (1, 1, 'notice-1-1'); -INSERT INTO notices VALUES (1, 2, 'notice-1-2'); -INSERT INTO notices VALUES (2, 0, 'notice-2-0'); -INSERT INTO notices VALUES (2, 1, 'notice-2-1'); -INSERT INTO notices VALUES (2, 2, 'notice-2-2'); - --- insert vouchers -INSERT INTO vouchers VALUES (0, 0, 'destination', 'voucher-0-0'); -INSERT INTO vouchers VALUES (0, 1, 'destination', 'voucher-0-1'); -INSERT INTO vouchers VALUES (0, 2, 'destination', 'voucher-0-2'); -INSERT INTO vouchers VALUES (1, 0, 'destination', 'voucher-1-0'); -INSERT INTO vouchers VALUES (1, 1, 'destination', 'voucher-1-1'); -INSERT INTO vouchers VALUES (1, 2, 'destination', 'voucher-1-2'); -INSERT INTO vouchers VALUES (2, 0, 'destination', 'voucher-2-0'); -INSERT INTO vouchers VALUES (2, 1, 'destination', 'voucher-2-1'); -INSERT INTO vouchers VALUES (2, 2, 'destination', 'voucher-2-2'); - --- insert reports -INSERT INTO reports VALUES (0, 0, 'report-0-0'); -INSERT INTO reports VALUES (0, 1, 'report-0-1'); -INSERT INTO reports VALUES (0, 2, 'report-0-2'); -INSERT INTO reports VALUES (1, 0, 'report-1-0'); -INSERT INTO reports VALUES (1, 1, 'report-1-1'); -INSERT INTO reports VALUES (1, 2, 'report-1-2'); -INSERT INTO reports VALUES (2, 0, 'report-2-0'); -INSERT INTO reports VALUES (2, 1, 'report-2-1'); -INSERT INTO reports VALUES (2, 2, 'report-2-2'); - -INSERT INTO proofs VALUES (0, 0, 'voucher', 0, 0, '', '', '', '', ARRAY[''::bytea], ARRAY[''::bytea], ''); -INSERT INTO proofs VALUES (0, 0, 'notice', 0, 0, '', '', '', '', ARRAY[''::bytea], ARRAY[''::bytea], ''); diff --git a/offchain/dispatcher/Cargo.toml b/offchain/dispatcher/Cargo.toml deleted file mode 100644 index 03ef88301..000000000 --- a/offchain/dispatcher/Cargo.toml +++ /dev/null @@ -1,37 +0,0 @@ -[package] -name = "dispatcher" -edition.workspace = true -license.workspace = true -version.workspace = true - -[[bin]] -name = "cartesi-rollups-dispatcher" -path = "src/main.rs" - -[dependencies] -http-server = { path = "../http-server" } -log = { path = "../log" } -rollups-events = { path = "../rollups-events" } -types = { path = "../types" } - -async-trait.workspace = true -backoff = { workspace = true, features = ["tokio"] } -clap = { workspace = true, features = ["derive", "env"] } -eth-state-client-lib.workspace = true -eth-state-fold-types = { workspace = true, features = ["ethers"] } -futures.workspace = true -snafu.workspace = true -tokio = { workspace = true, features = ["sync", "macros", "rt-multi-thread"] } -tokio-stream.workspace = true -tonic.workspace = true -tracing.workspace = true - -[dev-dependencies] -test-fixtures = { path = "../test-fixtures" } - -im = { workspace = true, features = ["serde"] } -rand.workspace = true -redis.workspace = true -serial_test.workspace = true -testcontainers.workspace = true -tracing-test = { workspace = true, features = ["no-env-filter"] } diff --git a/offchain/dispatcher/README.md b/offchain/dispatcher/README.md deleted file mode 100644 index 19fbe8749..000000000 --- a/offchain/dispatcher/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Dispatcher - -This service generates rollups inputs from state changes in the blockchain detected by the state-server. -These inputs are sent to the broker to be eventually used by the advance-runner. diff --git a/offchain/dispatcher/src/config.rs b/offchain/dispatcher/src/config.rs deleted file mode 100644 index eb1f16124..000000000 --- a/offchain/dispatcher/src/config.rs +++ /dev/null @@ -1,98 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; -use eth_state_client_lib::config::{ - Error as SCError, SCConfig, SCEnvCLIConfig, -}; -use http_server::HttpServerConfig; -use log::{LogConfig, LogEnvCliConfig}; -use snafu::{ResultExt, Snafu}; -use types::blockchain_config::{ - BlockchainCLIConfig, BlockchainConfig, BlockchainConfigError, -}; - -use rollups_events::{BrokerCLIConfig, BrokerConfig}; - -#[derive(Parser)] -#[command(name = "rd_config")] -#[command(about = "Configuration for dispatcher")] -pub struct DispatcherEnvCLIConfig { - #[command(flatten)] - pub sc_config: SCEnvCLIConfig, - - #[command(flatten)] - pub broker_config: BrokerCLIConfig, - - #[command(flatten)] - pub log_config: LogEnvCliConfig, - - #[command(flatten)] - pub blockchain_config: BlockchainCLIConfig, - - /// Duration of rollups epoch in blocks, for which dispatcher will make claims. - #[arg(long, env, default_value = "7200")] - pub rd_epoch_length: u64, - - /// Chain ID - #[arg(long, env)] - pub chain_id: u64, -} - -#[derive(Clone, Debug)] -pub struct DispatcherConfig { - pub sc_config: SCConfig, - pub broker_config: BrokerConfig, - pub log_config: LogConfig, - pub blockchain_config: BlockchainConfig, - - pub epoch_length: u64, - pub chain_id: u64, -} - -#[derive(Debug, Snafu)] -pub enum Error { - #[snafu(display("StateClient configuration error"))] - StateClientError { source: SCError }, - - #[snafu(display("Blockchain configuration error"))] - BlockchainError { source: BlockchainConfigError }, -} - -#[derive(Debug)] -pub struct Config { - pub dispatcher_config: DispatcherConfig, - pub http_server_config: HttpServerConfig, -} - -impl Config { - pub fn initialize() -> Result { - let (http_server_config, dispatcher_config) = - HttpServerConfig::parse::("dispatcher"); - - let sc_config = SCConfig::initialize(dispatcher_config.sc_config) - .context(StateClientSnafu)?; - - let log_config = LogConfig::initialize(dispatcher_config.log_config); - - let blockchain_config = - BlockchainConfig::try_from(dispatcher_config.blockchain_config) - .context(BlockchainSnafu)?; - - let broker_config = BrokerConfig::from(dispatcher_config.broker_config); - - let dispatcher_config = DispatcherConfig { - sc_config, - broker_config, - log_config, - blockchain_config, - epoch_length: dispatcher_config.rd_epoch_length, - chain_id: dispatcher_config.chain_id, - }; - - Ok(Config { - dispatcher_config, - http_server_config, - }) - } -} diff --git a/offchain/dispatcher/src/dispatcher.rs b/offchain/dispatcher/src/dispatcher.rs deleted file mode 100644 index da5693ae8..000000000 --- a/offchain/dispatcher/src/dispatcher.rs +++ /dev/null @@ -1,168 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use eth_state_client_lib::StateServer; -use eth_state_fold_types::{Block, BlockStreamItem}; -use rollups_events::DAppMetadata; -use std::sync::Arc; -use tokio_stream::StreamExt; -use tracing::{error, instrument, trace, warn}; -use types::foldables::{InputBox, InputBoxInitialState}; - -use crate::{ - config::DispatcherConfig, - drivers::{machine::MachineDriver, Context}, - error::{BrokerSnafu, DispatcherError, StateServerSnafu}, - machine::{rollups_broker::BrokerFacade, BrokerSend}, - metrics::DispatcherMetrics, - setup::{create_block_subscription, create_context, create_state_server}, -}; - -use snafu::{whatever, ResultExt}; - -#[instrument(level = "trace", skip_all)] -pub async fn start( - config: DispatcherConfig, - metrics: DispatcherMetrics, -) -> Result<(), DispatcherError> { - trace!("Setting up dispatcher"); - - let dapp_metadata = DAppMetadata { - chain_id: config.chain_id, - dapp_address: config.blockchain_config.dapp_address.clone(), - }; - - trace!("Creating state-server connection"); - let state_server = create_state_server(&config.sc_config).await?; - - trace!("Starting block subscription with confirmations"); - let mut block_subscription = create_block_subscription( - &state_server, - config.sc_config.default_confirmations, - ) - .await?; - - trace!("Creating broker connection"); - let broker = - BrokerFacade::new(config.broker_config.clone(), dapp_metadata.clone()) - .await - .context(BrokerSnafu)?; - - trace!("Creating machine driver and blockchain driver"); - let mut machine_driver = MachineDriver::new( - config - .blockchain_config - .dapp_address - .clone() - .into_inner() - .into(), - ); - - let initial_state = InputBoxInitialState { - dapp_address: Arc::new( - config - .blockchain_config - .dapp_address - .clone() - .into_inner() - .into(), - ), - input_box_address: Arc::new( - config - .blockchain_config - .input_box_address - .clone() - .into_inner() - .into(), - ), - }; - - trace!("Creating context"); - let mut context = create_context( - &(config.clone()), - &state_server, - &broker, - dapp_metadata, - metrics, - ) - .await?; - - trace!("Starting dispatcher..."); - loop { - match block_subscription.next().await { - Some(Ok(BlockStreamItem::NewBlock(b))) => { - // Normal operation, react on newest block. - trace!( - "Received block number {} and hash {:?}, parent: {:?}", - b.number, - b.hash, - b.parent_hash - ); - process_block( - &b, - &state_server, - &initial_state, - &mut context, - &mut machine_driver, - &broker, - ) - .await? - } - - Some(Ok(BlockStreamItem::Reorg(bs))) => { - error!( - "Deep blockchain reorg of {} blocks; new latest has number {:?}, hash {:?}, and parent {:?}", - bs.len(), - bs.last().map(|b| b.number), - bs.last().map(|b| b.hash), - bs.last().map(|b| b.parent_hash) - ); - error!("Bailing..."); - whatever!("deep blockchain reorg"); - } - - Some(Err(e)) => { - warn!( - "Subscription returned error `{}`; waiting for next block...", - e - ); - } - - None => { - whatever!("subscription closed"); - } - } - } -} - -#[instrument(level = "trace", skip_all)] -#[allow(clippy::too_many_arguments)] -async fn process_block( - block: &Block, - - state_server: &impl StateServer< - InitialState = InputBoxInitialState, - State = InputBox, - >, - initial_state: &InputBoxInitialState, - - context: &mut Context, - machine_driver: &mut MachineDriver, - - broker: &impl BrokerSend, -) -> Result<(), DispatcherError> { - trace!("Querying rollup state"); - let state = state_server - .query_state(initial_state, block.hash) - .await - .context(StateServerSnafu)?; - - // Drive machine - trace!("Reacting to state with `machine_driver`"); - machine_driver - .react(context, &state.block, &state.state, broker) - .await - .context(BrokerSnafu)?; - - Ok(()) -} diff --git a/offchain/dispatcher/src/drivers/context.rs b/offchain/dispatcher/src/drivers/context.rs deleted file mode 100644 index a8063bbcf..000000000 --- a/offchain/dispatcher/src/drivers/context.rs +++ /dev/null @@ -1,689 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::{ - machine::{rollups_broker::BrokerFacadeError, BrokerSend}, - metrics::DispatcherMetrics, -}; - -use rollups_events::DAppMetadata; -use types::foldables::Input; - -#[derive(Debug)] -pub struct Context { - inputs_sent: u64, - last_input_epoch: Option, - last_finished_epoch: Option, - - // constants - genesis_block: u64, - epoch_length: u64, - - // metrics - dapp_metadata: DAppMetadata, - metrics: DispatcherMetrics, -} - -impl Context { - pub fn new( - genesis_block: u64, - epoch_length: u64, - dapp_metadata: DAppMetadata, - metrics: DispatcherMetrics, - ) -> Self { - assert!(epoch_length > 0); - Self { - inputs_sent: 0, - last_input_epoch: None, - last_finished_epoch: None, - genesis_block, - epoch_length, - dapp_metadata, - metrics, - } - } - - pub fn inputs_sent(&self) -> u64 { - self.inputs_sent - } - - pub async fn finish_epoch_if_needed( - &mut self, - block_number: u64, - broker: &impl BrokerSend, - ) -> Result<(), BrokerFacadeError> { - let epoch = self.calculate_epoch(block_number); - if self.should_finish_epoch(epoch) { - self.finish_epoch(broker).await?; - } - Ok(()) - } - - pub async fn enqueue_input( - &mut self, - input: &Input, - broker: &impl BrokerSend, - ) -> Result<(), BrokerFacadeError> { - let input_block_number = input.block_added.number.as_u64(); - self.finish_epoch_if_needed(input_block_number, broker) - .await?; - - broker.enqueue_input(self.inputs_sent, input).await?; - - self.metrics - .advance_inputs_sent - .get_or_create(&self.dapp_metadata) - .inc(); - - self.inputs_sent += 1; - - let input_epoch = self.calculate_epoch(input_block_number); - self.last_finished_epoch.map(|last_finished_epoch| { - // Asserting that the calculated epoch comes after the last finished epoch. - // (If last_finished_epoch == None then we don't need the assertion.) - assert!(input_epoch > last_finished_epoch) - }); - self.last_input_epoch = Some(input_epoch); - - Ok(()) - } -} - -impl Context { - fn calculate_epoch(&self, block_number: u64) -> u64 { - assert!(block_number >= self.genesis_block); - (block_number - self.genesis_block) / self.epoch_length - } - - fn should_finish_epoch(&self, epoch: u64) -> bool { - // Being (last_input_epoch >= last_finished_epoch) a structural invariant. - // Being the current epoch the epoch of the last input. - // - // If last_finished_epoch is None and last_input_epoch is None, - // then there are no inputs by definition and the current epoch is empty. - // - // If last_finished_epoch is Some(x) and last_input_epoch is Some(x), - // then an epoch was finished and the last enqueued input belongs to that epoch; - // meaning that any subsequent epochs (which includes the current one) are empty. - // - // If last_finished_epoch is Some(x) and last_input_epoch is Some(y), or - // If last_finished_epoch is None and last_input_epoch is Some(_), then - // the current epoch is not empty by definition and by the structural invariant. - // - // The state in which last_finished_epoch is Some(_) and last_input_epoch is None is - // impossible (by the structural invariant). - if self.last_finished_epoch == self.last_input_epoch { - return false; // if the current epoch is empty - } - - if epoch == self.last_input_epoch.unwrap() { - return false; // if the current epoch is still not over - } - - epoch > self.last_finished_epoch.unwrap_or(0) - } - - async fn finish_epoch( - &mut self, - broker: &impl BrokerSend, - ) -> Result<(), BrokerFacadeError> { - // Asserting that there are inputs in the current epoch. - assert!( - match (self.last_input_epoch, self.last_finished_epoch) { - (Some(input_epoch), Some(finished_epoch)) => input_epoch > finished_epoch, - (Some(_), None) => true, // Consider input_epoch greater than None - (None, _) => false, // None is never greater than any value - }, - "Assertion failed: last_input_epoch should be greater than last_finished_epoch" - ); - - broker.finish_epoch(self.inputs_sent).await?; - self.metrics - .finish_epochs_sent - .get_or_create(&self.dapp_metadata) - .inc(); - - self.last_finished_epoch = self.last_input_epoch; - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use std::collections::VecDeque; - - use crate::drivers::mock::Event; - use rollups_events::DAppMetadata; - use serial_test::serial; - - use crate::{drivers::mock, metrics::DispatcherMetrics}; - - use super::Context; - - impl Default for Context { - fn default() -> Self { - Context::new( - /* genesis_block */ 0, - /* epoch_length */ 10, - /* dapp_metadata */ DAppMetadata::default(), - /* metrics */ DispatcherMetrics::default(), - ) - } - } - - // -------------------------------------------------------------------------------------------- - // calculate_epoch - // -------------------------------------------------------------------------------------------- - - #[test] - fn calculate_epoch_with_zero_genesis() { - let mut context = Context::default(); - context.genesis_block = 0; - context.epoch_length = 10; - - let number_of_epochs = 10; - let mut tested = 0; - for current_epoch in 0..number_of_epochs { - let block_lower_bound = current_epoch * context.epoch_length; - let block_upper_bound = (current_epoch + 1) * context.epoch_length; - for i in block_lower_bound..block_upper_bound { - assert_eq!(context.calculate_epoch(i), current_epoch); - tested += 1; - } - } - - assert_eq!(tested, number_of_epochs * context.epoch_length); - assert_eq!( - context.calculate_epoch(context.epoch_length * number_of_epochs), - context.epoch_length - ); - } - - #[test] - fn calculate_epoch_with_offset_genesis() { - let mut context = Context::default(); - context.genesis_block = 2; - context.epoch_length = 2; - - assert_eq!(context.calculate_epoch(2), 0); - assert_eq!(context.calculate_epoch(3), 0); - assert_eq!(context.calculate_epoch(4), 1); - assert_eq!(context.calculate_epoch(5), 1); - assert_eq!(context.calculate_epoch(6), 2); - } - - #[test] - #[should_panic] - fn calculate_epoch_should_panic_because_block_came_before_genesis() { - let mut context = Context::default(); - context.genesis_block = 4; - context.epoch_length = 4; - context.calculate_epoch(2); - } - - // -------------------------------------------------------------------------------------------- - // should_finish_epoch -- first epoch - // -------------------------------------------------------------------------------------------- - - #[test] - fn should_finish_the_first_epoch() { - let mut context = Context::default(); - context.inputs_sent = 1; - context.last_input_epoch = Some(0); - context.last_finished_epoch = None; - let epoch = context.calculate_epoch(10); - assert_eq!(context.should_finish_epoch(epoch), true); - } - - #[test] - fn should_finish_the_first_epoch_after_several_blocks() { - let mut context = Context::default(); - context.inputs_sent = 110; - context.last_input_epoch = Some(9); - context.last_finished_epoch = None; - let epoch = context.calculate_epoch(100); - assert_eq!(context.should_finish_epoch(epoch), true); - } - - #[test] - fn should_not_finish_an_empty_first_epoch() { - let mut context = Context::default(); - context.inputs_sent = 0; - context.last_input_epoch = None; - context.last_finished_epoch = None; - let epoch = context.calculate_epoch(10); - assert_eq!(context.should_finish_epoch(epoch), false); - } - - #[test] - fn should_not_finish_a_very_late_empty_first_epoch() { - let mut context = Context::default(); - context.inputs_sent = 0; - context.last_input_epoch = None; - context.last_finished_epoch = None; - let epoch = context.calculate_epoch(2340); - assert_eq!(context.should_finish_epoch(epoch), false); - } - - #[test] - fn should_not_finish_a_timely_first_epoch() { - let mut context = Context::default(); - context.inputs_sent = 1; - context.last_input_epoch = Some(0); - context.last_finished_epoch = None; - let epoch = context.calculate_epoch(9); - assert_eq!(context.should_finish_epoch(epoch), false); - } - - // -------------------------------------------------------------------------------------------- - // should_finish_epoch -- other epochs - // -------------------------------------------------------------------------------------------- - - #[test] - fn should_finish_epoch() { - let mut context = Context::default(); - context.inputs_sent = 42; - context.last_input_epoch = Some(4); - context.last_finished_epoch = Some(3); - let epoch = context.calculate_epoch(54); - assert_eq!(context.should_finish_epoch(epoch), true); - } - - #[test] - fn should_finish_epoch_by_a_lot() { - let mut context = Context::default(); - context.inputs_sent = 142; - context.last_input_epoch = Some(15); - context.last_finished_epoch = Some(2); - let epoch = context.calculate_epoch(190); - assert_eq!(context.should_finish_epoch(epoch), true); - } - - #[test] - fn should_not_finish_an_empty_epoch() { - let mut context = Context::default(); - context.inputs_sent = 120; - context.last_input_epoch = Some(9); - context.last_finished_epoch = Some(9); - let epoch = context.calculate_epoch(105); - assert_eq!(context.should_finish_epoch(epoch), false); - } - - #[test] - fn should_not_finish_a_very_late_empty_epoch() { - let mut context = Context::default(); - context.inputs_sent = 120; - context.last_input_epoch = Some(15); - context.last_finished_epoch = Some(15); - let epoch = context.calculate_epoch(1000); - assert_eq!(context.should_finish_epoch(epoch), false); - } - - #[test] - fn should_not_finish_a_timely_epoch() { - let mut context = Context::default(); - context.inputs_sent = 230; - context.last_input_epoch = Some(11); - context.last_finished_epoch = Some(10); - let epoch = context.calculate_epoch(110); - assert_eq!(context.should_finish_epoch(epoch), false); - } - - // -------------------------------------------------------------------------------------------- - // finish_epoch - // -------------------------------------------------------------------------------------------- - - #[tokio::test] - async fn finish_epoch_ok() { - let mut context = Context::default(); - context.inputs_sent = 1; - context.last_input_epoch = Some(0); - context.last_finished_epoch = None; - - let broker = mock::Broker::new(vec![], vec![]); - let result = context.finish_epoch(&broker).await; - assert!(result.is_ok()); - assert_eq!(context.inputs_sent, 1); - assert_eq!(context.last_input_epoch, Some(0)); - assert_eq!(context.last_finished_epoch, Some(0)); - } - - #[tokio::test] - async fn finish_epoch_broker_error() { - let mut context = Context::default(); - context.inputs_sent = 1; - context.last_input_epoch = Some(0); - context.last_finished_epoch = None; - - let broker = mock::Broker::with_finish_epoch_error(); - let result = context.finish_epoch(&broker).await; - assert!(result.is_err()); - assert_eq!(context.inputs_sent, 1); - assert_eq!(context.last_input_epoch, Some(0)); - assert_eq!(context.last_finished_epoch, None); - } - - // -------------------------------------------------------------------------------------------- - // new - // -------------------------------------------------------------------------------------------- - - #[tokio::test] - async fn new_ok() { - let genesis_block = 42; - let epoch_length = 24; - - let context = Context::new( - genesis_block, - epoch_length, - DAppMetadata::default(), - DispatcherMetrics::default(), - ); - - assert_eq!(context.genesis_block, genesis_block); - assert_eq!(context.epoch_length, epoch_length); - assert_eq!(context.dapp_metadata, DAppMetadata::default()); - } - - #[test] - #[should_panic] - fn new_should_panic_because_epoch_length_is_zero() { - Context::new( - 0, - 0, - DAppMetadata::default(), - DispatcherMetrics::default(), - ); - } - - // -------------------------------------------------------------------------------------------- - // inputs_sent_count - // -------------------------------------------------------------------------------------------- - - #[test] - fn inputs_sent_count() { - let number_of_inputs_sent = 42; - let mut context = Context::default(); - context.inputs_sent = number_of_inputs_sent; - assert_eq!(context.inputs_sent(), number_of_inputs_sent); - } - - // -------------------------------------------------------------------------------------------- - // finish_epoch_if_needed - // -------------------------------------------------------------------------------------------- - - #[tokio::test] - async fn finish_epoch_if_needed_true() { - let mut context = Context::default(); - context.inputs_sent = 9; - context.last_input_epoch = Some(0); - context.last_finished_epoch = None; - - let broker = mock::Broker::new(vec![], vec![]); - let result = context.finish_epoch_if_needed(12, &broker).await; - assert!(result.is_ok()); - broker.assert_state(vec![ - Event::FinishEpoch(0), // - ]); - } - - #[tokio::test] - async fn finish_epoch_if_needed_false() { - let mut context = Context::default(); - context.inputs_sent = 9; - context.last_input_epoch = Some(0); - context.last_finished_epoch = None; - - let broker = mock::Broker::new(vec![], vec![]); - let result = context.finish_epoch_if_needed(9, &broker).await; - assert!(result.is_ok()); - broker.assert_state(vec![]); - } - - #[tokio::test] - async fn finish_epoch_if_needed_broker_error() { - let mut context = Context::default(); - context.inputs_sent = 9; - context.last_input_epoch = Some(0); - context.last_finished_epoch = None; - let broker = mock::Broker::with_finish_epoch_error(); - let result = context.finish_epoch_if_needed(28, &broker).await; - assert!(result.is_err()); - } - - // -------------------------------------------------------------------------------------------- - // enqueue_input - // -------------------------------------------------------------------------------------------- - - #[tokio::test] - async fn enqueue_input_ok() { - let number_of_inputs_sent = 42; - let last_input_epoch = Some(1); - let last_finished_epoch = None; - - let mut context = Context::default(); - context.inputs_sent = number_of_inputs_sent; - context.last_input_epoch = last_input_epoch; - context.last_finished_epoch = last_finished_epoch; - - let input = mock::new_input(22); - let broker = mock::Broker::new(vec![], vec![]); - let result = context.enqueue_input(&input, &broker).await; - assert!(result.is_ok()); - - assert_eq!(context.inputs_sent, number_of_inputs_sent + 1); - assert_eq!(context.last_input_epoch, Some(2)); - assert_eq!(context.last_finished_epoch, Some(1)); - - broker.assert_state(vec![ - Event::FinishEpoch(0), - Event::Input(number_of_inputs_sent), - ]); - } - - #[tokio::test] - async fn enqueue_input_broker_error() { - let mut context = Context::default(); - let broker = mock::Broker::with_enqueue_input_error(); - let result = context.enqueue_input(&mock::new_input(82), &broker).await; - assert!(result.is_err()); - } - - // -------------------------------------------------------------------------------------------- - // deterministic behavior - // -------------------------------------------------------------------------------------------- - - #[derive(Clone)] - struct Case { - input_blocks: Vec, - epoch_length: u64, - last_block: u64, - expected: Vec, - } - - #[tokio::test] - #[serial] - async fn deterministic_behavior() { - let cases: Vec = vec![ - Case { - input_blocks: vec![], - epoch_length: 2, - last_block: 100, - expected: vec![], - }, - Case { - input_blocks: vec![0, 1, 4, 5], - epoch_length: 2, - last_block: 10, - expected: vec![ - Event::Input(0), - Event::Input(1), - Event::FinishEpoch(0), - Event::Input(2), - Event::Input(3), - Event::FinishEpoch(1), - ], - }, - Case { - input_blocks: vec![0, 0, 0, 7, 7], - epoch_length: 2, - last_block: 10, - expected: vec![ - Event::Input(0), - Event::Input(1), - Event::Input(2), - Event::FinishEpoch(0), - Event::Input(3), - Event::Input(4), - Event::FinishEpoch(1), - ], - }, - Case { - input_blocks: vec![0, 2], - epoch_length: 2, - last_block: 4, - expected: vec![ - Event::Input(0), - Event::FinishEpoch(0), - Event::Input(1), - Event::FinishEpoch(1), - ], - }, - Case { - input_blocks: vec![1, 2, 4], - epoch_length: 2, - last_block: 6, - expected: vec![ - Event::Input(0), - Event::FinishEpoch(0), - Event::Input(1), - Event::FinishEpoch(1), - Event::Input(2), - Event::FinishEpoch(2), - ], - }, - Case { - input_blocks: vec![0, 1, 1, 2, 3, 4, 5, 5, 5, 6, 7], - epoch_length: 2, - last_block: 7, - expected: vec![ - Event::Input(0), - Event::Input(1), - Event::Input(2), - Event::FinishEpoch(0), - Event::Input(3), - Event::Input(4), - Event::FinishEpoch(1), - Event::Input(5), - Event::Input(6), - Event::Input(7), - Event::Input(8), - Event::FinishEpoch(2), - Event::Input(9), - Event::Input(10), - ], - }, - Case { - input_blocks: vec![0, 5, 9], - epoch_length: 2, - last_block: 10, - expected: vec![ - Event::Input(0), - Event::FinishEpoch(0), - Event::Input(1), - Event::FinishEpoch(1), - Event::Input(2), - Event::FinishEpoch(2), - ], - }, - ]; - for (i, case) in cases.iter().enumerate() { - println!("Testing case {}.", i); - test_deterministic_case(case.clone()).await; - } - } - - // -------------------------------------------------------------------------------------------- - // auxiliary - // -------------------------------------------------------------------------------------------- - - async fn test_deterministic_case(case: Case) { - let broker1 = create_state_as_inputs_are_being_received( - case.epoch_length, - case.input_blocks.clone(), - case.last_block, - ) - .await; - let broker2 = create_state_by_receiving_all_inputs_at_once( - case.epoch_length, - case.input_blocks.clone(), - case.last_block, - ) - .await; - broker1.assert_state(case.expected.clone()); - broker2.assert_state(case.expected.clone()); - } - - async fn create_state_as_inputs_are_being_received( - epoch_length: u64, - input_blocks: Vec, - last_block: u64, - ) -> mock::Broker { - println!("================================================"); - println!("one_block_at_a_time:"); - - let mut input_blocks: VecDeque<_> = input_blocks.into(); - let mut current_input_block = input_blocks.pop_front(); - - let mut context = Context::default(); - context.epoch_length = epoch_length; - let broker = mock::Broker::new(vec![], vec![]); - - for block in 0..=last_block { - while let Some(input_block) = current_input_block { - if block == input_block { - println!("\tenqueue_input(input_block: {})", block); - let input = mock::new_input(block); - let result = context.enqueue_input(&input, &broker).await; - assert!(result.is_ok()); - - current_input_block = input_blocks.pop_front(); - } else { - break; - } - } - - println!("\tfinish_epoch_if_needed(block: {})\n", block); - let result = context.finish_epoch_if_needed(block, &broker).await; - assert!(result.is_ok()); - } - - broker - } - - async fn create_state_by_receiving_all_inputs_at_once( - epoch_length: u64, - input_blocks: Vec, - last_block: u64, - ) -> mock::Broker { - println!("all_inputs_at_once:"); - - let mut context = Context::default(); - context.epoch_length = epoch_length; - let broker = mock::Broker::new(vec![], vec![]); - - for block in input_blocks { - println!("\tenqueue_input(input_block: {})\n", block); - let input = mock::new_input(block); - let result = context.enqueue_input(&input, &broker).await; - assert!(result.is_ok()); - } - - println!("\tfinish_epoch_if_needed(last_block: {})", last_block); - let result = context.finish_epoch_if_needed(last_block, &broker).await; - assert!(result.is_ok()); - - println!("================================================"); - - broker - } -} diff --git a/offchain/dispatcher/src/drivers/machine.rs b/offchain/dispatcher/src/drivers/machine.rs deleted file mode 100644 index f899aae51..000000000 --- a/offchain/dispatcher/src/drivers/machine.rs +++ /dev/null @@ -1,421 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use super::Context; - -use crate::machine::{rollups_broker::BrokerFacadeError, BrokerSend}; - -use eth_state_fold_types::{ethereum_types::Address, Block}; -use types::foldables::{DAppInputBox, InputBox}; - -use tracing::{debug, instrument}; - -pub struct MachineDriver { - dapp_address: Address, -} - -impl MachineDriver { - pub fn new(dapp_address: Address) -> Self { - Self { dapp_address } - } - - #[instrument(level = "trace", skip_all)] - pub async fn react( - &self, - context: &mut Context, - block: &Block, - input_box: &InputBox, - broker: &impl BrokerSend, - ) -> Result<(), BrokerFacadeError> { - match input_box.dapp_input_boxes.get(&self.dapp_address) { - None => { - debug!("No inputs for dapp {}", self.dapp_address); - } - Some(dapp_input_box) => { - self.process_inputs(context, dapp_input_box, broker).await? - } - }; - - let block = block.number.as_u64(); - context.finish_epoch_if_needed(block, broker).await?; - - Ok(()) - } -} - -impl MachineDriver { - #[instrument(level = "trace", skip_all)] - async fn process_inputs( - &self, - context: &mut Context, - dapp_input_box: &DAppInputBox, - broker: &impl BrokerSend, - ) -> Result<(), BrokerFacadeError> { - tracing::trace!( - "Last input sent to machine manager `{}`, current input `{}`", - context.inputs_sent(), - dapp_input_box.inputs.len() - ); - - let input_slice = - dapp_input_box.inputs.skip(context.inputs_sent() as usize); - - for input in input_slice { - context.enqueue_input(&input, broker).await?; - } - - Ok(()) - } -} - -#[cfg(test)] -mod tests { - use std::sync::Arc; - - use eth_state_fold_types::ethereum_types::H160; - use rollups_events::DAppMetadata; - use types::foldables::InputBox; - - use crate::{ - drivers::{ - machine::MachineDriver, - mock::{self, Broker}, - Context, - }, - machine::RollupStatus, - metrics::DispatcherMetrics, - }; - - fn new_context(genesis_block: u64, epoch_length: u64) -> Context { - let context = Context::new( - genesis_block, - epoch_length, - DAppMetadata::default(), - DispatcherMetrics::default(), - ); - context - } - - fn new_broker(context: &Context) -> Broker { - mock::Broker::new( - vec![RollupStatus { - inputs_sent_count: context.inputs_sent(), - }], - Vec::new(), - ) - } - - // -------------------------------------------------------------------------------------------- - // process_inputs - // -------------------------------------------------------------------------------------------- - - async fn test_process_inputs( - mut context: Context, - broker: Broker, - input_blocks: Vec, - expected: Vec, - ) { - let machine_driver = MachineDriver::new(H160::random()); - let dapp_input_box = types::foldables::DAppInputBox { - inputs: input_blocks - .iter() - .map(|block| Arc::new(mock::new_input(*block))) - .collect::>() - .into(), - }; - let result = machine_driver - .process_inputs(&mut context, &dapp_input_box, &broker) - .await; - assert!(result.is_ok()); - - broker.assert_state(expected); - } - - #[tokio::test] - async fn process_inputs_without_skipping_inputs() { - let context = new_context(0, 10); - let broker = new_broker(&context); - let input_blocks = vec![0, 1, 2, 3]; - let expected = vec![ - mock::Event::Input(0), - mock::Event::Input(1), - mock::Event::Input(2), - mock::Event::Input(3), - ]; - test_process_inputs(context, broker, input_blocks, expected).await; - } - - #[tokio::test] - async fn process_inputs_with_some_skipped_inputs() { - let mut context = new_context(0, 10); - let mut throwaway_broker = new_broker(&context); - for i in 0..=1 { - assert!(context - .enqueue_input(&mock::new_input(i), &mut throwaway_broker) - .await - .is_ok()); - } - assert_eq!(2, context.inputs_sent()); - - let broker = new_broker(&context); - let input_blocks = vec![0, 1, 2, 3]; - let expected = vec![mock::Event::Input(2), mock::Event::Input(3)]; - test_process_inputs(context, broker, input_blocks, expected).await; - } - - #[tokio::test] - async fn process_inputs_skipping_all_inputs() { - let mut context = new_context(0, 10); - let mut throwaway_broker = new_broker(&context); - for i in 0..=3 { - assert!(context - .enqueue_input(&mock::new_input(i), &mut throwaway_broker) - .await - .is_ok()); - } - assert_eq!(4, context.inputs_sent()); - - let broker = new_broker(&context); - let input_blocks = vec![0, 1, 2, 3]; - let expected = vec![]; - test_process_inputs(context, broker, input_blocks, expected).await; - } - - // -------------------------------------------------------------------------------------------- - // react - // -------------------------------------------------------------------------------------------- - - async fn test_react( - block: u64, - mut context: Context, - broker: Option, - input_box: Option, - input_blocks: Vec, - expected: Vec, - ) -> (Context, Broker, InputBox) { - let rollup_status = RollupStatus { - inputs_sent_count: context.inputs_sent(), - }; - let broker = broker - .unwrap_or(mock::Broker::new(vec![rollup_status], Vec::new())); - let dapp_address = H160::random(); - let machine_driver = MachineDriver::new(dapp_address); - - let input_box = input_box.unwrap_or(mock::new_input_box()); - let input_box = - mock::update_input_box(input_box, dapp_address, input_blocks); - - let result = machine_driver - .react(&mut context, &mock::new_block(block), &input_box, &broker) - .await; - assert!(result.is_ok()); - - broker.assert_state(expected); - - (context, broker, input_box) - } - - #[tokio::test] - async fn react_without_finish_epoch() { - let block = 3; - let context = new_context(0, 10); - let input_blocks = vec![1, 2]; - let expected = vec![mock::Event::Input(0), mock::Event::Input(1)]; - test_react(block, context, None, None, input_blocks, expected).await; - } - - #[tokio::test] - async fn react_with_finish_epoch() { - let block = 10; - let context = new_context(0, 10); - let input_blocks = vec![1, 2]; - let expected = vec![ - mock::Event::Input(0), - mock::Event::Input(1), - mock::Event::FinishEpoch(0), - ]; - test_react(block, context, None, None, input_blocks, expected).await; - } - - #[tokio::test] - async fn react_with_internal_finish_epoch() { - let block = 14; - let context = new_context(0, 10); - let input_blocks = vec![9, 10]; - let expected = vec![ - mock::Event::Input(0), - mock::Event::FinishEpoch(0), - mock::Event::Input(1), - ]; - test_react(block, context, None, None, input_blocks, expected).await; - } - - #[tokio::test] - async fn react_without_inputs() { - let block = 10; - let context = new_context(0, 10); - let input_blocks = vec![]; - let expected = vec![]; - test_react(block, context, None, None, input_blocks, expected).await; - } - - // NOTE: this test shows we DON'T close the epoch after the first input! - #[tokio::test] - async fn react_with_inputs_after_first_epoch_length() { - let block = 20; - let context = new_context(0, 10); - let input_blocks = vec![14, 16, 18, 20]; - let expected = vec![ - mock::Event::Input(0), - mock::Event::Input(1), - mock::Event::Input(2), - mock::Event::FinishEpoch(0), - mock::Event::Input(3), - ]; - test_react(block, context, None, None, input_blocks, expected).await; - } - - #[tokio::test] - async fn react_is_deterministic() { - let final_expected = vec![ - mock::Event::Input(0), - mock::Event::FinishEpoch(0), - mock::Event::Input(1), - mock::Event::Input(2), - mock::Event::Input(3), - mock::Event::Input(4), - mock::Event::Input(5), - mock::Event::Input(6), - mock::Event::Input(7), - mock::Event::Input(8), - mock::Event::Input(9), - mock::Event::FinishEpoch(1), - mock::Event::Input(10), - mock::Event::Input(11), - mock::Event::Input(12), - mock::Event::Input(13), - mock::Event::Input(14), - mock::Event::Input(15), - mock::Event::FinishEpoch(2), - mock::Event::Input(16), - mock::Event::Input(17), - mock::Event::Input(18), - ]; - - { - // original - let block1 = 3100; - let block2 = 6944; - - let context = new_context(0, 1000); - - let input_blocks1 = vec![ - 56, // - // - 1078, // - 1091, // - 1159, // - 1204, // - 1227, // - 1280, // - 1298, // - 1442, // - 1637, // - // - 2827, // - 2881, // - 2883, // - 2887, // - 2891, // - 2934, // - ]; - let mut input_blocks2 = input_blocks1.clone(); - input_blocks2.append(&mut vec![ - 6160, // - 6864, // - 6944, // - ]); - - let expected1 = vec![ - mock::Event::Input(0), - mock::Event::FinishEpoch(0), - mock::Event::Input(1), - mock::Event::Input(2), - mock::Event::Input(3), - mock::Event::Input(4), - mock::Event::Input(5), - mock::Event::Input(6), - mock::Event::Input(7), - mock::Event::Input(8), - mock::Event::Input(9), - mock::Event::FinishEpoch(1), - mock::Event::Input(10), - mock::Event::Input(11), - mock::Event::Input(12), - mock::Event::Input(13), - mock::Event::Input(14), - mock::Event::Input(15), - mock::Event::FinishEpoch(2), - ]; - - let (context, broker, input_box) = test_react( - block1, - context, - None, - None, - input_blocks1, - expected1, - ) - .await; - - test_react( - block2, - context, - Some(broker), - Some(input_box), - input_blocks2, - final_expected.clone(), - ) - .await; - } - - { - // reconstruction - let block = 6944; - let context = new_context(0, 1000); - let input_blocks = vec![ - 56, // - // - 1078, // - 1091, // - 1159, // - 1204, // - 1227, // - 1280, // - 1298, // - 1442, // - 1637, // - // - 2827, // - 2881, // - 2883, // - 2887, // - 2891, // - 2934, // - // - 6160, // - 6864, // - 6944, // - ]; - test_react( - block, - context, - None, - None, - input_blocks, - final_expected, - ) - .await; - } - } -} diff --git a/offchain/dispatcher/src/drivers/mock.rs b/offchain/dispatcher/src/drivers/mock.rs deleted file mode 100644 index 951eb743e..000000000 --- a/offchain/dispatcher/src/drivers/mock.rs +++ /dev/null @@ -1,204 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use async_trait::async_trait; -use eth_state_fold_types::{ - ethereum_types::{Address, Bloom, H160, H256}, - Block, -}; -use im::{hashmap, Vector}; -use rollups_events::RollupsClaim; -use snafu::whatever; -use std::{ - collections::VecDeque, - ops::{Deref, DerefMut}, - sync::{ - atomic::{AtomicU16, Ordering}, - Arc, Mutex, - }, -}; -use types::foldables::{DAppInputBox, Input, InputBox}; - -use crate::machine::{ - rollups_broker::BrokerFacadeError, BrokerSend, BrokerStatus, RollupStatus, -}; - -// ------------------------------------------------------------------------------------------------ -// auxiliary functions -// ------------------------------------------------------------------------------------------------ - -pub fn new_block(number: u64) -> Block { - Block { - hash: H256::random(), - number: number.into(), - parent_hash: H256::random(), - timestamp: 0.into(), - logs_bloom: Bloom::default(), - } -} - -pub fn new_input(block: u64) -> Input { - Input { - sender: Arc::new(H160::random()), - payload: vec![], - block_added: Arc::new(new_block(block)), - dapp: Arc::new(H160::random()), - tx_hash: Arc::new(H256::default()), - } -} - -pub fn new_input_box() -> InputBox { - InputBox { - dapp_address: Arc::new(H160::random()), - input_box_address: Arc::new(H160::random()), - dapp_input_boxes: Arc::new(hashmap! {}), - } -} - -pub fn update_input_box( - input_box: InputBox, - dapp_address: Address, - blocks: Vec, -) -> InputBox { - let inputs = blocks - .iter() - .map(|block| Arc::new(new_input(*block))) - .collect::>(); - let inputs = Vector::from(inputs); - let dapp_input_boxes = input_box - .dapp_input_boxes - .update(Arc::new(dapp_address), Arc::new(DAppInputBox { inputs })); - InputBox { - dapp_address: Arc::new(dapp_address), - input_box_address: input_box.input_box_address, - dapp_input_boxes: Arc::new(dapp_input_boxes), - } -} - -// ------------------------------------------------------------------------------------------------ -// Broker -// ------------------------------------------------------------------------------------------------ - -#[derive(Debug, Clone, Copy, PartialEq)] -pub enum Event { - Input(u64), // input index - FinishEpoch(u64), // index of the "external" epoch -} - -#[derive(Debug)] -pub struct Broker { - pub rollup_statuses: Mutex>, - pub next_claims: Mutex>, - pub events: Mutex>, - - finish_epochs: AtomicU16, - status_error: bool, - enqueue_input_error: bool, - finish_epoch_error: bool, -} - -impl Broker { - fn default() -> Self { - Self { - rollup_statuses: Mutex::new(VecDeque::new()), - next_claims: Mutex::new(VecDeque::new()), - events: Mutex::new(Vec::new()), - finish_epochs: AtomicU16::new(0), - status_error: false, - enqueue_input_error: false, - finish_epoch_error: false, - } - } - - pub fn new( - rollup_statuses: Vec, - next_claims: Vec, - ) -> Self { - let mut broker = Self::default(); - broker.rollup_statuses = Mutex::new(rollup_statuses.into()); - broker.next_claims = Mutex::new(next_claims.into()); - broker - } - - pub fn with_enqueue_input_error() -> Self { - let mut broker = Self::default(); - broker.enqueue_input_error = true; - broker - } - - pub fn with_finish_epoch_error() -> Self { - let mut broker = Self::default(); - broker.finish_epoch_error = true; - broker - } - - fn events_len(&self) -> usize { - let mutex_guard = self.events.lock().unwrap(); - mutex_guard.deref().len() - } - - fn get_event(&self, i: usize) -> Event { - let mutex_guard = self.events.lock().unwrap(); - mutex_guard.deref().get(i).unwrap().clone() - } - - pub fn assert_state(&self, expected: Vec) { - assert_eq!( - self.events_len(), - expected.len(), - "\n{:?}\n{:?}", - self.events.lock().unwrap().deref(), - expected - ); - println!("Events:"); - for (i, expected) in expected.iter().enumerate() { - let event = self.get_event(i); - println!("index: {:?} => {:?} - {:?}", i, event, expected); - assert_eq!(event, *expected); - } - } -} - -#[async_trait] -impl BrokerStatus for Broker { - async fn status(&self) -> Result { - if self.status_error { - whatever!("status error") - } else { - let mut mutex_guard = self.rollup_statuses.lock().unwrap(); - Ok(mutex_guard.deref_mut().pop_front().unwrap()) - } - } -} - -#[async_trait] -impl BrokerSend for Broker { - async fn enqueue_input( - &self, - input_index: u64, - _: &Input, - ) -> Result<(), BrokerFacadeError> { - if self.enqueue_input_error { - whatever!("enqueue_input error") - } else { - let mut mutex_guard = self.events.lock().unwrap(); - mutex_guard.deref_mut().push(Event::Input(input_index)); - Ok(()) - } - } - - async fn finish_epoch(&self, _: u64) -> Result<(), BrokerFacadeError> { - if self.finish_epoch_error { - whatever!("finish_epoch error") - } else { - let mut mutex_guard = self.events.lock().unwrap(); - let current_epoch = self.finish_epochs.load(Ordering::SeqCst); - mutex_guard - .deref_mut() - .push(Event::FinishEpoch(current_epoch.into())); - self.finish_epochs - .store(current_epoch + 1, Ordering::SeqCst); - Ok(()) - } - } -} diff --git a/offchain/dispatcher/src/drivers/mod.rs b/offchain/dispatcher/src/drivers/mod.rs deleted file mode 100644 index e40c82b93..000000000 --- a/offchain/dispatcher/src/drivers/mod.rs +++ /dev/null @@ -1,10 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub mod context; -pub mod machine; - -pub use context::Context; - -#[cfg(test)] -mod mock; diff --git a/offchain/dispatcher/src/error.rs b/offchain/dispatcher/src/error.rs deleted file mode 100644 index 2b101ff7c..000000000 --- a/offchain/dispatcher/src/error.rs +++ /dev/null @@ -1,43 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use eth_state_client_lib::error::StateServerError; -use snafu::Snafu; -use std::net::AddrParseError; -use tonic::{codegen::http::uri::InvalidUri, transport::Error as TonicError}; - -use crate::machine; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] -pub enum DispatcherError { - #[snafu(display("http server error"))] - HttpServerError { source: std::io::Error }, - - #[snafu(display("metrics address error"))] - MetricsAddressError { source: AddrParseError }, - - #[snafu(display("broker facade error"))] - BrokerError { - source: machine::rollups_broker::BrokerFacadeError, - }, - - #[snafu(display("connection error"))] - ChannelError { source: InvalidUri }, - - #[snafu(display("connection error"))] - ConnectError { source: TonicError }, - - #[snafu(display("state server error"))] - StateServerError { source: StateServerError }, - - #[snafu(display("can't start dispatcher with dirty broker"))] - DirtyBrokerError {}, - - #[snafu(whatever, display("{message}"))] - Whatever { - message: String, - #[snafu(source(from(Box, Some)))] - source: Option>, - }, -} diff --git a/offchain/dispatcher/src/lib.rs b/offchain/dispatcher/src/lib.rs deleted file mode 100644 index 35926d551..000000000 --- a/offchain/dispatcher/src/lib.rs +++ /dev/null @@ -1,33 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub mod config; -pub mod dispatcher; -pub mod machine; - -mod drivers; -mod error; -mod metrics; -mod setup; - -use config::Config; -use error::DispatcherError; -use metrics::DispatcherMetrics; -use snafu::ResultExt; - -#[tracing::instrument(level = "trace", skip_all)] -pub async fn run(config: Config) -> Result<(), DispatcherError> { - let metrics = DispatcherMetrics::default(); - let dispatcher_handle = - dispatcher::start(config.dispatcher_config, metrics.clone()); - let http_server_handle = - http_server::start(config.http_server_config, metrics.into()); - tokio::select! { - ret = http_server_handle => { - ret.context(error::HttpServerSnafu) - } - ret = dispatcher_handle => { - ret - } - } -} diff --git a/offchain/dispatcher/src/machine/mod.rs b/offchain/dispatcher/src/machine/mod.rs deleted file mode 100644 index 99a4f2623..000000000 --- a/offchain/dispatcher/src/machine/mod.rs +++ /dev/null @@ -1,33 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub mod rollups_broker; - -use types::foldables::Input; - -use async_trait::async_trait; - -use self::rollups_broker::BrokerFacadeError; - -#[derive(Debug, Clone, Copy, Default)] -pub struct RollupStatus { - pub inputs_sent_count: u64, -} - -#[async_trait] -pub trait BrokerStatus: std::fmt::Debug { - async fn status(&self) -> Result; -} - -#[async_trait] -pub trait BrokerSend: std::fmt::Debug { - async fn enqueue_input( - &self, - input_index: u64, - input: &Input, - ) -> Result<(), BrokerFacadeError>; - async fn finish_epoch( - &self, - inputs_sent_count: u64, - ) -> Result<(), BrokerFacadeError>; -} diff --git a/offchain/dispatcher/src/machine/rollups_broker.rs b/offchain/dispatcher/src/machine/rollups_broker.rs deleted file mode 100644 index a2f729766..000000000 --- a/offchain/dispatcher/src/machine/rollups_broker.rs +++ /dev/null @@ -1,513 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use async_trait::async_trait; -use snafu::{ResultExt, Snafu}; -use tokio::sync::{self, Mutex}; - -use rollups_events::{ - Broker, BrokerConfig, BrokerError, DAppMetadata, Event, InputMetadata, - RollupsAdvanceStateInput, RollupsData, RollupsInput, RollupsInputsStream, - INITIAL_ID, -}; -use types::foldables::Input; - -use super::{BrokerSend, BrokerStatus, RollupStatus}; - -#[derive(Debug, Snafu)] -pub enum BrokerFacadeError { - #[snafu(display("error connecting to the broker"))] - BrokerConnectionError { source: BrokerError }, - - #[snafu(display("error peeking at the end of the stream"))] - PeekInputError { source: BrokerError }, - - #[snafu(display("error producing input event"))] - ProduceInputError { source: BrokerError }, - - #[snafu(display("error producing finish-epoch event"))] - ProduceFinishError { source: BrokerError }, - - #[snafu(whatever, display("{message}"))] - Whatever { - message: String, - #[snafu(source(from(Box, Some)))] - source: Option>, - }, -} - -#[derive(Debug)] -pub struct BrokerFacade { - broker: Mutex, - inputs_stream: RollupsInputsStream, -} - -struct BrokerStreamStatus { - id: String, - epoch_number: u64, - status: RollupStatus, -} - -impl BrokerFacade { - #[tracing::instrument(level = "trace", skip_all)] - pub async fn new( - config: BrokerConfig, - dapp_metadata: DAppMetadata, - ) -> Result { - tracing::trace!(?config, "connection to the broker"); - Ok(Self { - broker: Mutex::new( - Broker::new(config).await.context(BrokerConnectionSnafu)?, - ), - inputs_stream: RollupsInputsStream::new(&dapp_metadata), - }) - } - - #[tracing::instrument(level = "trace", skip_all)] - async fn broker_status( - &self, - broker: &mut sync::MutexGuard<'_, Broker>, - ) -> Result { - let event = self.peek(broker).await?; - - let old_epoch_index = event - .clone() - .map(|event| event.payload.epoch_index) - .unwrap_or(0); - - // The epoch gets incremented inside this ".into()"! - // Check From> for BrokerStreamStatus - let status: BrokerStreamStatus = event.into(); - - // Asserting that the epoch_index is continuous. - let new_epoch_index = status.epoch_number; - assert!( - new_epoch_index == old_epoch_index - || new_epoch_index == old_epoch_index + 1 - ); - - Ok(status) - } - - #[tracing::instrument(level = "trace", skip_all)] - async fn peek( - &self, - broker: &mut sync::MutexGuard<'_, Broker>, - ) -> Result>, BrokerFacadeError> { - tracing::trace!("peeking last produced event"); - let response = broker - .peek_latest(&self.inputs_stream) - .await - .context(PeekInputSnafu)?; - tracing::trace!(?response, "got response"); - - Ok(response) - } -} - -#[async_trait] -impl BrokerStatus for BrokerFacade { - #[tracing::instrument(level = "trace", skip_all)] - async fn status(&self) -> Result { - tracing::trace!("querying broker status"); - let mut broker = self.broker.lock().await; - let status = self.broker_status(&mut broker).await?.status; - tracing::trace!(?status, "returning rollup status"); - Ok(status) - } -} - -macro_rules! input_sanity_check { - ($event:expr, $input_index:expr) => { - assert_eq!($event.inputs_sent_count, $input_index + 1); - assert!(matches!( - $event.data, - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - epoch_index, - .. - }, - .. - }) if epoch_index == 0 - )); - assert!(matches!( - $event.data, - RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: InputMetadata { - input_index, - .. - }, - .. - }) if input_index == $input_index - )); - }; -} - -macro_rules! epoch_sanity_check { - ($event:expr, $inputs_sent_count:expr) => { - assert_eq!($event.inputs_sent_count, $inputs_sent_count); - assert!(matches!($event.data, RollupsData::FinishEpoch { .. })); - }; -} - -#[async_trait] -impl BrokerSend for BrokerFacade { - #[tracing::instrument(level = "trace", skip_all)] - async fn enqueue_input( - &self, - input_index: u64, - input: &Input, - ) -> Result<(), BrokerFacadeError> { - tracing::trace!(?input_index, ?input, "enqueueing input"); - - let mut broker = self.broker.lock().await; - let status = self.broker_status(&mut broker).await?; - - let event = build_next_input(input, &status); - tracing::info!(?event, "producing input event"); - - input_sanity_check!(event, input_index); - - let id = broker - .produce(&self.inputs_stream, event) - .await - .context(ProduceInputSnafu)?; - tracing::trace!(id, "produced event with id"); - - Ok(()) - } - - #[tracing::instrument(level = "trace", skip_all)] - async fn finish_epoch( - &self, - inputs_sent_count: u64, - ) -> Result<(), BrokerFacadeError> { - tracing::info!(?inputs_sent_count, "finishing epoch"); - - let mut broker = self.broker.lock().await; - let status = self.broker_status(&mut broker).await?; - - let event = build_next_finish_epoch(&status); - tracing::trace!(?event, "producing finish epoch event"); - - epoch_sanity_check!(event, inputs_sent_count); - - let id = broker - .produce(&self.inputs_stream, event) - .await - .context(ProduceFinishSnafu)?; - - tracing::trace!(id, "produce event with id"); - - Ok(()) - } -} - -impl From for RollupStatus { - fn from(payload: RollupsInput) -> Self { - RollupStatus { - inputs_sent_count: payload.inputs_sent_count, - } - } -} - -impl From> for BrokerStreamStatus { - fn from(event: Event) -> Self { - let id = event.id; - let payload = event.payload; - let epoch_index = payload.epoch_index; - - match payload.data { - RollupsData::AdvanceStateInput { .. } => Self { - id, - epoch_number: epoch_index, - status: payload.into(), - }, - - RollupsData::FinishEpoch { .. } => Self { - id, - epoch_number: epoch_index + 1, // Epoch number being incremented! - status: payload.into(), - }, - } - } -} - -impl From>> for BrokerStreamStatus { - fn from(event: Option>) -> Self { - match event { - Some(e) => e.into(), - - None => Self { - id: INITIAL_ID.to_owned(), - epoch_number: 0, - status: RollupStatus::default(), - }, - } - } -} - -fn build_next_input( - input: &Input, - status: &BrokerStreamStatus, -) -> RollupsInput { - let metadata = InputMetadata { - msg_sender: input.sender.to_fixed_bytes().into(), - block_number: input.block_added.number.as_u64(), - timestamp: input.block_added.timestamp.as_u64(), - epoch_index: 0, - input_index: status.status.inputs_sent_count, - }; - - let data = RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata, - payload: input.payload.clone().into(), - tx_hash: input.tx_hash.0.into(), - }); - - RollupsInput { - parent_id: status.id.clone(), - epoch_index: status.epoch_number, - inputs_sent_count: status.status.inputs_sent_count + 1, - data, - } -} - -fn build_next_finish_epoch(status: &BrokerStreamStatus) -> RollupsInput { - RollupsInput { - parent_id: status.id.clone(), - epoch_index: status.epoch_number, - inputs_sent_count: status.status.inputs_sent_count, - data: RollupsData::FinishEpoch {}, - } -} - -#[cfg(test)] -mod broker_facade_tests { - use std::{sync::Arc, time::Duration}; - - use backoff::ExponentialBackoffBuilder; - use eth_state_fold_types::{ - ethereum_types::{Bloom, H160, H256, U256, U64}, - Block, - }; - use rollups_events::{ - BrokerConfig, BrokerEndpoint, DAppMetadata, Hash, InputMetadata, - Payload, RedactedUrl, RollupsAdvanceStateInput, RollupsData, Url, - }; - use test_fixtures::broker::BrokerFixture; - use testcontainers::clients::Cli; - use types::foldables::Input; - - use crate::machine::{ - rollups_broker::BrokerFacadeError, BrokerSend, BrokerStatus, - }; - - use super::BrokerFacade; - - // -------------------------------------------------------------------------------------------- - // new - // -------------------------------------------------------------------------------------------- - - #[tokio::test] - async fn new_ok() { - let docker = Cli::default(); - let (_fixture, _broker) = setup(&docker).await; - } - - #[tokio::test] - async fn new_error() { - let docker = Cli::default(); - let error = failable_setup(&docker, true) - .await - .err() - .expect("'status' function has not failed") - .to_string(); - // BrokerFacadeError::BrokerConnectionError - assert_eq!(error, "error connecting to the broker"); - } - - // -------------------------------------------------------------------------------------------- - // status - // -------------------------------------------------------------------------------------------- - - #[tokio::test] - async fn status_inputs_sent_count_equals_0() { - let docker = Cli::default(); - let (_fixture, broker) = setup(&docker).await; - let status = broker.status().await.expect("'status' function failed"); - assert_eq!(status.inputs_sent_count, 0); - } - - #[tokio::test] - async fn status_inputs_sent_count_equals_1() { - let docker = Cli::default(); - let (fixture, broker) = setup(&docker).await; - produce_advance_state_inputs(&fixture, 1).await; - let status = broker.status().await.expect("'status' function failed"); - assert_eq!(status.inputs_sent_count, 1); - } - - #[tokio::test] - async fn status_inputs_sent_count_equals_10() { - let docker = Cli::default(); - let (fixture, broker) = setup(&docker).await; - produce_advance_state_inputs(&fixture, 10).await; - let status = broker.status().await.expect("'status' function failed"); - assert_eq!(status.inputs_sent_count, 10); - } - - // -------------------------------------------------------------------------------------------- - // enqueue_input - // -------------------------------------------------------------------------------------------- - - #[tokio::test] - async fn enqueue_input_ok() { - let docker = Cli::default(); - let (_fixture, broker) = setup(&docker).await; - for i in 0..3 { - assert!(broker - .enqueue_input(i, &new_enqueue_input()) - .await - .is_ok()); - } - } - - #[tokio::test] - #[should_panic( - expected = "assertion `left == right` failed\n left: 1\n right: 6" - )] - async fn enqueue_input_assertion_error_1() { - let docker = Cli::default(); - let (_fixture, broker) = setup(&docker).await; - let _ = broker.enqueue_input(5, &new_enqueue_input()).await; - } - - #[tokio::test] - #[should_panic( - expected = "assertion `left == right` failed\n left: 5\n right: 6" - )] - async fn enqueue_input_assertion_error_2() { - let docker = Cli::default(); - let (_fixture, broker) = setup(&docker).await; - for i in 0..4 { - assert!(broker - .enqueue_input(i, &new_enqueue_input()) - .await - .is_ok()); - } - let _ = broker.enqueue_input(5, &new_enqueue_input()).await; - } - - // NOTE: cannot test result error because the dependency is not injectable. - - // -------------------------------------------------------------------------------------------- - // finish_epoch - // -------------------------------------------------------------------------------------------- - - #[tokio::test] - async fn finish_epoch_ok_1() { - let docker = Cli::default(); - let (_fixture, broker) = setup(&docker).await; - assert!(broker.finish_epoch(0).await.is_ok()); - // BONUS TEST: testing for a finished epoch with no inputs - assert!(broker.finish_epoch(0).await.is_ok()); - } - - #[tokio::test] - async fn finish_epoch_ok_2() { - let docker = Cli::default(); - let (fixture, broker) = setup(&docker).await; - let first_epoch_inputs = 3; - produce_advance_state_inputs(&fixture, first_epoch_inputs).await; - produce_finish_epoch_input(&fixture).await; - let second_epoch_inputs = 7; - produce_advance_state_inputs(&fixture, second_epoch_inputs).await; - let total_inputs = first_epoch_inputs + second_epoch_inputs; - assert!(broker.finish_epoch(total_inputs as u64).await.is_ok()); - } - - #[tokio::test] - #[should_panic( - expected = "assertion `left == right` failed\n left: 0\n right: 1" - )] - async fn finish_epoch_assertion_error() { - let docker = Cli::default(); - let (_fixture, broker) = setup(&docker).await; - let _ = broker.finish_epoch(1).await; - } - - // NOTE: cannot test result error because the dependency is not injectable. - - // -------------------------------------------------------------------------------------------- - // auxiliary - // -------------------------------------------------------------------------------------------- - - async fn failable_setup( - docker: &Cli, - should_fail: bool, - ) -> Result<(BrokerFixture, BrokerFacade), BrokerFacadeError> { - let fixture = BrokerFixture::setup(docker).await; - let redis_endpoint = if should_fail { - BrokerEndpoint::Single(RedactedUrl::new( - Url::parse("https://invalid.com").unwrap(), - )) - } else { - fixture.redis_endpoint().clone() - }; - let config = BrokerConfig { - redis_endpoint, - consume_timeout: 300000, - backoff: ExponentialBackoffBuilder::new() - .with_initial_interval(Duration::from_millis(1000)) - .with_max_elapsed_time(Some(Duration::from_millis(3000))) - .build(), - }; - let metadata = DAppMetadata { - chain_id: fixture.chain_id(), - dapp_address: fixture.dapp_address().clone(), - }; - let broker = BrokerFacade::new(config, metadata).await?; - Ok((fixture, broker)) - } - - async fn setup(docker: &Cli) -> (BrokerFixture, BrokerFacade) { - failable_setup(docker, false).await.unwrap() - } - - fn new_enqueue_input() -> Input { - Input { - sender: Arc::new(H160::random()), - payload: vec![], - block_added: Arc::new(Block { - hash: H256::random(), - number: U64::zero(), - parent_hash: H256::random(), - timestamp: U256::zero(), - logs_bloom: Bloom::default(), - }), - dapp: Arc::new(H160::random()), - tx_hash: Arc::new(H256::random()), - } - } - - async fn produce_advance_state_inputs(fixture: &BrokerFixture<'_>, n: u32) { - for _ in 0..n { - let _ = fixture - .produce_input_event(RollupsData::AdvanceStateInput( - RollupsAdvanceStateInput { - metadata: InputMetadata::default(), - payload: Payload::default(), - tx_hash: Hash::default(), - }, - )) - .await; - } - } - - async fn produce_finish_epoch_input(fixture: &BrokerFixture<'_>) { - let _ = fixture - .produce_input_event(RollupsData::FinishEpoch {}) - .await; - } -} diff --git a/offchain/dispatcher/src/main.rs b/offchain/dispatcher/src/main.rs deleted file mode 100644 index 2ef8247e3..000000000 --- a/offchain/dispatcher/src/main.rs +++ /dev/null @@ -1,15 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -// NOTE: doesn't support History upgradability. -// NOTE: doesn't support changing epoch_duration in the middle of things. -#[tokio::main] -async fn main() -> Result<(), Box> { - let config = dispatcher::config::Config::initialize()?; - - log::configure(&config.dispatcher_config.log_config); - - log::log_service_start(&config, "Dispatcher"); - - dispatcher::run(config).await.map_err(|e| e.into()) -} diff --git a/offchain/dispatcher/src/metrics.rs b/offchain/dispatcher/src/metrics.rs deleted file mode 100644 index 332e35938..000000000 --- a/offchain/dispatcher/src/metrics.rs +++ /dev/null @@ -1,40 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use http_server::{CounterRef, FamilyRef, Registry}; -use rollups_events::DAppMetadata; - -const METRICS_PREFIX: &str = "cartesi_rollups_dispatcher"; - -fn prefixed_metrics(name: &str) -> String { - format!("{}_{}", METRICS_PREFIX, name) -} - -#[derive(Debug, Clone, Default)] -pub struct DispatcherMetrics { - pub claims_sent: FamilyRef, - pub advance_inputs_sent: FamilyRef, - pub finish_epochs_sent: FamilyRef, -} - -impl From for Registry { - fn from(metrics: DispatcherMetrics) -> Self { - let mut registry = Registry::default(); - registry.register( - prefixed_metrics("claims_sent"), - "Counts the number of claims sent", - metrics.claims_sent, - ); - registry.register( - prefixed_metrics("advance_inputs_sent"), - "Counts the number of s sent", - metrics.advance_inputs_sent, - ); - registry.register( - prefixed_metrics("finish_epochs_sent"), - "Counts the number of s sent", - metrics.finish_epochs_sent, - ); - registry - } -} diff --git a/offchain/dispatcher/src/setup.rs b/offchain/dispatcher/src/setup.rs deleted file mode 100644 index c95d02bc0..000000000 --- a/offchain/dispatcher/src/setup.rs +++ /dev/null @@ -1,106 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use eth_state_client_lib::{ - config::SCConfig, error::StateServerError, BlockServer, - GrpcStateFoldClient, StateServer, -}; -use eth_state_fold_types::{ethereum_types::U64, BlockStreamItem}; -use rollups_events::DAppMetadata; -use snafu::{ensure, ResultExt}; -use tokio_stream::{Stream, StreamExt}; -use tonic::transport::Channel; -use types::foldables::{InputBox, InputBoxInitialState}; - -use crate::{ - config::DispatcherConfig, - drivers::Context, - error::{ - BrokerSnafu, ChannelSnafu, ConnectSnafu, DirtyBrokerSnafu, - DispatcherError, StateServerSnafu, - }, - machine::BrokerStatus, - metrics::DispatcherMetrics, -}; - -const BUFFER_LEN: usize = 256; - -pub async fn create_state_server( - config: &SCConfig, -) -> Result< - impl StateServer - + BlockServer, - DispatcherError, -> { - let channel = Channel::from_shared(config.grpc_endpoint.to_owned()) - .context(ChannelSnafu)? - .connect() - .await - .context(ConnectSnafu)?; - - Ok(GrpcStateFoldClient::new_from_channel(channel, config)) -} - -pub async fn create_block_subscription( - client: &impl BlockServer, - confirmations: usize, -) -> Result< - impl Stream> - + Send - + std::marker::Unpin, - DispatcherError, -> { - let s = client - .subscribe_blocks(confirmations) - .await - .context(StateServerSnafu)?; - - let s = { - use futures::StreamExt; - s.ready_chunks(BUFFER_LEN) - }; - - let s = s.filter_map( - |mut x| { - if x.len() == BUFFER_LEN { - None - } else { - x.pop() - } - }, - ); - - Ok(s) -} - -pub async fn create_context( - config: &DispatcherConfig, - block_server: &impl BlockServer, - broker: &impl BrokerStatus, - dapp_metadata: DAppMetadata, - metrics: DispatcherMetrics, -) -> Result { - let input_box_deployment_block_number = - U64::from(config.blockchain_config.input_box_deployment_block_number); - let genesis_block = block_server - .query_block(input_box_deployment_block_number) - .await - .context(StateServerSnafu)? - .number - .as_u64(); - - let status = broker.status().await.context(BrokerSnafu)?; - - // The dispatcher doesn't work properly if there are inputs in the broker from a previous run. - // Hence, we make sure that the broker is in a clean state before starting. - ensure!(status.inputs_sent_count == 0, DirtyBrokerSnafu); - - let context = Context::new( - genesis_block, - config.epoch_length, - dapp_metadata, - metrics, - ); - - Ok(context) -} diff --git a/offchain/graphql-server/Cargo.toml b/offchain/graphql-server/Cargo.toml deleted file mode 100644 index a75da1767..000000000 --- a/offchain/graphql-server/Cargo.toml +++ /dev/null @@ -1,36 +0,0 @@ -[package] -name = "graphql-server" -edition.workspace = true -license.workspace = true -version.workspace = true - -[[bin]] -name = "cartesi-rollups-graphql-server" -path = "src/main.rs" - -[[bin]] -name = "generate-schema" -path = "src/schema/generate_schema.rs" - -[dependencies] -http-health-check = { path = "../http-health-check" } -log = { path = "../log" } -rollups-data = { path = "../data" } - -actix-cors.workspace = true -actix-web.workspace = true -clap = { workspace = true, features = ["derive", "env"] } -hex.workspace = true -juniper.workspace = true -serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true -snafu.workspace = true -tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } -tracing.workspace = true - -[dev-dependencies] -test-fixtures = { path = "../test-fixtures" } - -awc.workspace = true -serial_test.workspace = true -testcontainers.workspace = true diff --git a/offchain/graphql-server/README.md b/offchain/graphql-server/README.md deleted file mode 100644 index 86896eb6e..000000000 --- a/offchain/graphql-server/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# GraphQL Server - -This service exposes a GraphQL endpoint for easy querying of Rollups data. - -## Generating GraphQL Schema - -Run the following command to generate the GraphQL schema based on the Rust code: - -``` -cargo run --bin generate-schema -``` - -## Running - -To run the GraphQL server locally, you need to setup a PosgreSQL database as described in the data crate [README.md](../data/README.md). -Then, run the following command: - -``` -cargo run --bin graphql-server -- --postgres-password pw -``` diff --git a/offchain/graphql-server/src/config.rs b/offchain/graphql-server/src/config.rs deleted file mode 100644 index 6a7bbaea3..000000000 --- a/offchain/graphql-server/src/config.rs +++ /dev/null @@ -1,48 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; -use log::{LogConfig, LogEnvCliConfig}; -use rollups_data::{RepositoryCLIConfig, RepositoryConfig}; - -#[derive(Debug)] -pub struct GraphQLConfig { - pub repository_config: RepositoryConfig, - pub log_config: LogConfig, - pub graphql_host: String, - pub graphql_port: u16, - pub healthcheck_port: u16, -} - -#[derive(Parser)] -#[command(name = "graphql_server_config")] -#[command(about = "Configuration for graphql-server")] -pub struct CLIConfig { - #[command(flatten)] - repository_config: RepositoryCLIConfig, - - #[command(flatten)] - pub log_config: LogEnvCliConfig, - - #[arg(long, env, default_value = "127.0.0.1")] - pub graphql_host: String, - - #[arg(long, env, default_value_t = 4000)] - pub graphql_port: u16, - - /// Port of health check - #[arg(long, env = "GRAPHQL_HEALTHCHECK_PORT", default_value_t = 8080)] - pub healthcheck_port: u16, -} - -impl From for GraphQLConfig { - fn from(cli_config: CLIConfig) -> Self { - Self { - repository_config: cli_config.repository_config.into(), - log_config: cli_config.log_config.into(), - graphql_host: cli_config.graphql_host, - graphql_port: cli_config.graphql_port, - healthcheck_port: cli_config.healthcheck_port, - } - } -} diff --git a/offchain/graphql-server/src/error.rs b/offchain/graphql-server/src/error.rs deleted file mode 100644 index 2d87932b2..000000000 --- a/offchain/graphql-server/src/error.rs +++ /dev/null @@ -1,16 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] -pub enum GraphQLServerError { - #[snafu(display("health check error"))] - HealthCheckError { - source: http_health_check::HealthCheckError, - }, - - #[snafu(display("server error"))] - ServerError { source: std::io::Error }, -} diff --git a/offchain/graphql-server/src/http.rs b/offchain/graphql-server/src/http.rs deleted file mode 100644 index a079ed8a8..000000000 --- a/offchain/graphql-server/src/http.rs +++ /dev/null @@ -1,95 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::schema::{Context, Query, RollupsGraphQLScalarValue, Schema}; -use actix_cors::Cors; -use actix_web::dev::Server; -use actix_web::{ - middleware::Logger, web, web::Data, App, HttpResponse, HttpServer, - Responder, -}; -use juniper::http::playground::playground_source; -use juniper::http::GraphQLRequest; -use juniper::{EmptyMutation, EmptySubscription}; -use std::sync::Arc; - -struct HttpContext { - schema: Arc, - context: Context, -} - -pub fn start_service( - host: &str, - port: u16, - context: Context, -) -> std::io::Result { - Ok(HttpServer::new(move || { - let schema = std::sync::Arc::new(Schema::new_with_scalar_value( - Query, - EmptyMutation::new(), - EmptySubscription::new(), - )); - - let http_context = HttpContext { - schema: schema.clone(), - context: context.clone(), - }; - - let cors = Cors::permissive(); - - App::new() - .app_data(Data::new(http_context)) - .wrap(Logger::default()) - .wrap(cors) - .service(graphql) - .service(juniper_playground) - }) - .bind((host, port))? - .run()) -} - -#[actix_web::get("/graphql")] -async fn juniper_playground() -> impl Responder { - let html = playground_source("", None); - HttpResponse::Ok() - .content_type("text/html; charset=utf-8") - .body(html) -} - -#[actix_web::post("/graphql")] -async fn graphql( - query: web::Json>, - http_context: web::Data, -) -> HttpResponse { - // Execute resolvers in blocking thread as there are lot of blocking diesel db operations - let query = Arc::new(query); - let return_value: HttpResponse = match tokio::task::spawn_blocking( - move || { - let res = - query.execute_sync(&http_context.schema, &http_context.context); - serde_json::to_string(&res) - }, - ) - .await - { - Ok(value) => match value { - Ok(value) => HttpResponse::Ok() - .content_type("application/json") - .body(value), - Err(err) => { - let error_message = format!( - "unable to execute query, internal server error, details: {}", err - ); - return HttpResponse::BadRequest().body(error_message); - } - }, - Err(err) => { - let error_message = format!( - "unable to execute query, internal server error, details: {}", - err - ); - return HttpResponse::BadRequest().body(error_message); - } - }; - return_value -} diff --git a/offchain/graphql-server/src/lib.rs b/offchain/graphql-server/src/lib.rs deleted file mode 100644 index 8e476d82f..000000000 --- a/offchain/graphql-server/src/lib.rs +++ /dev/null @@ -1,35 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use snafu::ResultExt; - -pub use config::{CLIConfig, GraphQLConfig}; -pub use error::GraphQLServerError; -pub use http::start_service; -pub use schema::Context; - -pub mod config; -mod error; -pub mod http; -pub mod schema; - -#[tracing::instrument(level = "trace", skip_all)] -pub async fn run(config: GraphQLConfig) -> Result<(), GraphQLServerError> { - let repository = rollups_data::Repository::new(config.repository_config) - .expect("failed to connect to database"); - let context = Context::new(repository); - let service_handler = - start_service(&config.graphql_host, config.graphql_port, context) - .expect("failed to create server"); - - let health_handle = http_health_check::start(config.healthcheck_port); - - tokio::select! { - ret = health_handle => { - ret.context(error::HealthCheckSnafu) - } - ret = service_handler => { - ret.context(error::ServerSnafu) - } - } -} diff --git a/offchain/graphql-server/src/main.rs b/offchain/graphql-server/src/main.rs deleted file mode 100644 index 9ac649376..000000000 --- a/offchain/graphql-server/src/main.rs +++ /dev/null @@ -1,17 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; - -use graphql_server::{CLIConfig, GraphQLConfig}; - -#[actix_web::main] -async fn main() -> Result<(), Box> { - let config: GraphQLConfig = CLIConfig::parse().into(); - - log::configure(&config.log_config); - - log::log_service_start(&config, "GraphQL Server"); - - graphql_server::run(config).await.map_err(|e| e.into()) -} diff --git a/offchain/graphql-server/src/schema/generate_schema.rs b/offchain/graphql-server/src/schema/generate_schema.rs deleted file mode 100644 index 14b6c7b1e..000000000 --- a/offchain/graphql-server/src/schema/generate_schema.rs +++ /dev/null @@ -1,30 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -// Note: it was impractical to generate schema with build.rs, -// because it is executed before crate is built, and many structures/entities -// from graphql module must be used to generate schema - -use juniper::{EmptyMutation, EmptySubscription}; -use std::fs::File; -use std::io::Write; - -use graphql_server::schema::{Query, Schema}; - -const GRAPHQL_SCHEMA_FILE: &str = "schema.graphql"; - -fn main() { - let schema = Schema::new_with_scalar_value( - Query {}, - EmptyMutation::new(), - EmptySubscription::new(), - ); - let graphql_schema = schema.as_schema_language(); - let mut graphql_schema_file = File::create(GRAPHQL_SCHEMA_FILE).unwrap(); - match write!(graphql_schema_file, "{}", graphql_schema) { - Ok(_) => {} - Err(e) => { - eprintln!("Error writing schema to file {}", e); - } - } -} diff --git a/offchain/graphql-server/src/schema/mod.rs b/offchain/graphql-server/src/schema/mod.rs deleted file mode 100644 index ff5e51dad..000000000 --- a/offchain/graphql-server/src/schema/mod.rs +++ /dev/null @@ -1,16 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod resolvers; -mod scalar; - -pub use resolvers::{Context, Query}; -pub use scalar::RollupsGraphQLScalarValue; - -pub type Schema = juniper::RootNode< - 'static, - Query, - juniper::EmptyMutation, - juniper::EmptySubscription, - RollupsGraphQLScalarValue, ->; diff --git a/offchain/graphql-server/src/schema/resolvers.rs b/offchain/graphql-server/src/schema/resolvers.rs deleted file mode 100644 index 44b1d1c07..000000000 --- a/offchain/graphql-server/src/schema/resolvers.rs +++ /dev/null @@ -1,742 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use juniper::{ - graphql_object, DefaultScalarValue, FieldError, FieldResult, GraphQLEnum, - GraphQLInputObject, GraphQLObject, -}; -use std::time::UNIX_EPOCH; - -use rollups_data::Repository; -use rollups_data::{ - CompletionStatus as DbCompletionStatus, Connection, Edge, Input, - InputQueryFilter, Notice, NoticeQueryFilter, OutputEnum, - PageInfo as DbPageInfo, Proof, Report, ReportQueryFilter, Voucher, - VoucherQueryFilter, -}; - -use super::scalar::RollupsGraphQLScalarValue; - -#[derive(Clone)] -pub struct Context { - repository: Repository, -} - -impl Context { - pub fn new(repository: Repository) -> Self { - Self { repository } - } -} - -impl juniper::Context for Context {} - -pub struct Query; - -#[graphql_object( - context = Context, - Scalar = RollupsGraphQLScalarValue - description = "Top level queries" -)] -impl Query { - #[graphql(description = "Get input based on its identifier")] - fn input( - #[graphql(description = "Input index")] index: i32, - ) -> FieldResult { - executor - .context() - .repository - .get_input(index) - .map_err(convert_error) - } - - #[graphql(description = "Get voucher based on its index")] - fn voucher( - #[graphql(description = "Voucher index in input")] voucher_index: i32, - #[graphql(description = "Input index")] input_index: i32, - ) -> FieldResult { - executor - .context() - .repository - .get_voucher(voucher_index, input_index) - .map_err(convert_error) - } - - #[graphql(description = "Get notice based on its index")] - fn notice( - #[graphql(description = "Notice index in input")] notice_index: i32, - #[graphql(description = "Input index")] input_index: i32, - ) -> FieldResult { - executor - .context() - .repository - .get_notice(notice_index, input_index) - .map_err(convert_error) - } - - #[graphql(description = "Get report based on its index")] - fn report( - #[graphql(description = "Report index in input")] report_index: i32, - #[graphql(description = "Input index")] input_index: i32, - ) -> FieldResult { - executor - .context() - .repository - .get_report(report_index, input_index) - .map_err(convert_error) - } - - #[graphql(description = "Get inputs with support for pagination")] - fn inputs( - #[graphql( - description = "Get at most the first `n` entries (forward pagination)" - )] - first: Option, - #[graphql( - description = "Get at most the last `n` entries (backward pagination)" - )] - last: Option, - #[graphql( - description = "Get entries that come after the provided cursor (forward pagination)" - )] - after: Option, - #[graphql( - description = "Get entries that come before the provided cursor (backward pagination)" - )] - before: Option, - #[graphql(description = "Filter entries to retrieve")] r#where: Option< - InputFilter, - >, - ) -> FieldResult> { - let filter = r#where.map(InputFilter::into).unwrap_or_default(); - executor - .context() - .repository - .get_inputs(first, last, after, before, filter) - .map_err(convert_error) - } - - #[graphql(description = "Get vouchers with support for pagination")] - fn vouchers( - #[graphql( - description = "Get at most the first `n` entries (forward pagination)" - )] - first: Option, - #[graphql( - description = "Get at most the last `n` entries (backward pagination)" - )] - last: Option, - #[graphql( - description = "Get entries that come after the provided cursor (forward pagination)" - )] - after: Option, - #[graphql( - description = "Get entries that come before the provided cursor (backward pagination)" - )] - before: Option, - ) -> FieldResult> { - executor - .context() - .repository - .get_vouchers(first, last, after, before, Default::default()) - .map_err(convert_error) - } - - #[graphql(description = "Get notices with support for pagination")] - fn notices( - #[graphql( - description = "Get at most the first `n` entries (forward pagination)" - )] - first: Option, - #[graphql( - description = "Get at most the last `n` entries (backward pagination)" - )] - last: Option, - #[graphql( - description = "Get entries that come after the provided cursor (forward pagination)" - )] - after: Option, - #[graphql( - description = "Get entries that come before the provided cursor (backward pagination)" - )] - before: Option, - ) -> FieldResult> { - executor - .context() - .repository - .get_notices(first, last, after, before, Default::default()) - .map_err(convert_error) - } - - #[graphql(description = "Get reports with support for pagination")] - fn reports( - #[graphql( - description = "Get at most the first `n` entries (forward pagination)" - )] - first: Option, - #[graphql( - description = "Get at most the last `n` entries (backward pagination)" - )] - last: Option, - #[graphql( - description = "Get entries that come after the provided cursor (forward pagination)" - )] - after: Option, - #[graphql( - description = "Get entries that come before the provided cursor (backward pagination)" - )] - before: Option, - ) -> FieldResult> { - executor - .context() - .repository - .get_reports(first, last, after, before, Default::default()) - .map_err(convert_error) - } -} - -#[derive(GraphQLEnum)] -enum CompletionStatus { - Unprocessed, - Accepted, - Rejected, - Exception, - MachineHalted, - CycleLimitExceeded, - TimeLimitExceeded, - PayloadLengthLimitExceeded, -} - -impl From for CompletionStatus { - fn from(status: DbCompletionStatus) -> CompletionStatus { - match status { - DbCompletionStatus::Unprocessed => CompletionStatus::Unprocessed, - DbCompletionStatus::Accepted => CompletionStatus::Accepted, - DbCompletionStatus::Rejected => CompletionStatus::Rejected, - DbCompletionStatus::Exception => CompletionStatus::Exception, - DbCompletionStatus::MachineHalted => { - CompletionStatus::MachineHalted - } - DbCompletionStatus::CycleLimitExceeded => { - CompletionStatus::CycleLimitExceeded - } - DbCompletionStatus::TimeLimitExceeded => { - CompletionStatus::TimeLimitExceeded - } - DbCompletionStatus::PayloadLengthLimitExceeded => { - CompletionStatus::PayloadLengthLimitExceeded - } - } - } -} - -#[graphql_object( - context = Context, - Scalar = RollupsGraphQLScalarValue, - description = "Request submitted to the application to advance its state" -)] -impl Input { - #[graphql(description = "Input index starting from genesis")] - fn index(&self) -> i32 { - self.index - } - - #[graphql(description = "Status of the input")] - fn status(&self) -> CompletionStatus { - self.status.into() - } - - #[graphql(description = "Address responsible for submitting the input")] - fn msg_sender(&self) -> String { - hex_encode(&self.msg_sender) - } - - #[graphql( - description = "Timestamp associated with the input submission, as defined by the base layer's block in which it was recorded" - )] - fn timestamp(&self) -> i64 { - match self.timestamp.duration_since(UNIX_EPOCH) { - Ok(duration) => duration.as_secs() as i64, - Err(e) => { - tracing::warn!("failed to parse timestamp ({})", e); - 0 - } - } - } - - #[graphql( - description = "Number of the base layer block in which the input was recorded" - )] - fn block_number(&self) -> i64 { - self.block_number - } - - #[graphql( - description = "Input payload in Ethereum hex binary format, starting with '0x'" - )] - fn payload(&self) -> String { - hex_encode(&self.payload) - } - - #[graphql( - description = "Get voucher from this particular input given the voucher's index" - )] - fn voucher( - &self, - #[graphql(description = "Voucher index in input")] index: i32, - ) -> FieldResult { - executor - .context() - .repository - .get_voucher(index, self.index) - .map_err(convert_error) - } - - #[graphql( - description = "Get notice from this particular input given the notice's index" - )] - fn notice( - &self, - #[graphql(description = "Notice index in input")] index: i32, - ) -> FieldResult { - executor - .context() - .repository - .get_notice(index, self.index) - .map_err(convert_error) - } - - #[graphql( - description = "Get report from this particular input given the report's index" - )] - fn report( - &self, - #[graphql(description = "Report index in input")] index: i32, - ) -> FieldResult { - executor - .context() - .repository - .get_report(index, self.index) - .map_err(convert_error) - } - - #[graphql( - description = "Get vouchers from this particular input with support for pagination" - )] - fn vouchers( - &self, - #[graphql( - description = "Get at most the first `n` entries (forward pagination)" - )] - first: Option, - #[graphql( - description = "Get at most the last `n` entries (backward pagination)" - )] - last: Option, - #[graphql( - description = "Get entries that come after the provided cursor (forward pagination)" - )] - after: Option, - #[graphql( - description = "Get entries that come before the provided cursor (backward pagination)" - )] - before: Option, - ) -> FieldResult> { - let filter = VoucherQueryFilter { - input_index: Some(self.index), - }; - executor - .context() - .repository - .get_vouchers(first, last, after, before, filter) - .map_err(convert_error) - } - - #[graphql( - description = "Get notices from this particular input with support for pagination" - )] - fn notices( - &self, - #[graphql( - description = "Get at most the first `n` entries (forward pagination)" - )] - first: Option, - #[graphql( - description = "Get at most the last `n` entries (backward pagination)" - )] - last: Option, - #[graphql( - description = "Get entries that come after the provided cursor (forward pagination)" - )] - after: Option, - #[graphql( - description = "Get entries that come before the provided cursor (backward pagination)" - )] - before: Option, - ) -> FieldResult> { - let filter = NoticeQueryFilter { - input_index: Some(self.index), - }; - executor - .context() - .repository - .get_notices(first, last, after, before, filter) - .map_err(convert_error) - } - - #[graphql( - description = "Get reports from this particular input with support for pagination" - )] - fn reports( - &self, - #[graphql( - description = "Get at most the first `n` entries (forward pagination)" - )] - first: Option, - #[graphql( - description = "Get at most the last `n` entries (backward pagination)" - )] - last: Option, - #[graphql( - description = "Get entries that come after the provided cursor (forward pagination)" - )] - after: Option, - #[graphql( - description = "Get entries that come before the provided cursor (backward pagination)" - )] - before: Option, - ) -> FieldResult> { - let filter = ReportQueryFilter { - input_index: Some(self.index), - }; - executor - .context() - .repository - .get_reports(first, last, after, before, filter) - .map_err(convert_error) - } -} - -#[graphql_object( - context = Context, - Scalar = RollupsGraphQLScalarValue, - description = "Representation of a transaction that can be carried out on the base layer blockchain, such as a transfer of assets" -)] -impl Voucher { - #[graphql( - description = "Voucher index within the context of the input that produced it" - )] - fn index(&self) -> i32 { - self.index - } - - #[graphql(description = "Input whose processing produced the voucher")] - fn input(&self) -> FieldResult { - executor - .context() - .repository - .get_input(self.input_index) - .map_err(convert_error) - } - - #[graphql( - description = "Transaction destination address in Ethereum hex binary format (20 bytes), starting with '0x'" - )] - fn destination(&self) -> String { - hex_encode(&self.destination) - } - - #[graphql( - description = "Transaction payload in Ethereum hex binary format, starting with '0x'" - )] - fn payload(&self) -> String { - hex_encode(&self.payload) - } - - #[graphql( - description = "Proof object that allows this voucher to be validated and executed on the base layer blockchain" - )] - fn proof(&self) -> FieldResult> { - executor - .context() - .repository - .get_proof(self.input_index, self.index, OutputEnum::Voucher) - .map_err(convert_error) - } -} - -#[graphql_object( - context = Context, - Scalar = RollupsGraphQLScalarValue, - description = "Informational statement that can be validated in the base layer blockchain" -)] -impl Notice { - #[graphql( - description = "Notice index within the context of the input that produced it" - )] - fn index(&self) -> i32 { - self.index - } - - #[graphql(description = "Input whose processing produced the notice")] - fn input(&self) -> FieldResult { - executor - .context() - .repository - .get_input(self.input_index) - .map_err(convert_error) - } - - #[graphql( - description = "Notice data as a payload in Ethereum hex binary format, starting with '0x'" - )] - fn payload(&self) -> String { - hex_encode(&self.payload) - } - - #[graphql( - description = "Proof object that allows this notice to be validated by the base layer blockchain" - )] - fn proof(&self) -> FieldResult> { - executor - .context() - .repository - .get_proof(self.input_index, self.index, OutputEnum::Notice) - .map_err(convert_error) - } -} - -#[graphql_object( - context = Context, - Scalar = RollupsGraphQLScalarValue, - description = "Application log or diagnostic information" -)] -impl Report { - #[graphql( - description = "Report index within the context of the input that produced it" - )] - fn index(&self) -> i32 { - self.index - } - - #[graphql(description = "Input whose processing produced the report")] - fn input(&self) -> FieldResult { - executor - .context() - .repository - .get_input(self.input_index) - .map_err(convert_error) - } - - #[graphql( - description = "Report data as a payload in Ethereum hex binary format, starting with '0x'" - )] - fn payload(&self) -> String { - hex_encode(&self.payload) - } -} - -#[graphql_object( - context = Context, - Scalar = RollupsGraphQLScalarValue, - description = "Data that can be used as proof to validate notices and execute vouchers on the base layer blockchain" -)] -impl Proof { - #[graphql(description = "Validity proof for an output")] - fn validity(&self) -> OutputValidityProof { - OutputValidityProof { - input_index_within_epoch: self.validity_input_index_within_epoch, - output_index_within_input: self.validity_output_index_within_input, - output_hashes_root_hash: hex_encode( - &self.validity_output_hashes_root_hash, - ), - vouchers_epoch_root_hash: hex_encode( - &self.validity_vouchers_epoch_root_hash, - ), - notices_epoch_root_hash: hex_encode( - &self.validity_notices_epoch_root_hash, - ), - machine_state_hash: hex_encode(&self.validity_machine_state_hash), - output_hash_in_output_hashes_siblings: self - .validity_output_hash_in_output_hashes_siblings - .iter() - .map(|hash| hex_encode(hash.as_ref().unwrap_or(&vec![]))) - .collect(), - output_hashes_in_epoch_siblings: self - .validity_output_hashes_in_epoch_siblings - .iter() - .map(|hash| hex_encode(hash.as_ref().unwrap_or(&vec![]))) - .collect(), - } - } - - #[graphql( - description = "Data that allows the validity proof to be contextualized within submitted claims, given as a payload in Ethereum hex binary format, starting with '0x'" - )] - fn context(&self) -> String { - hex_encode(&self.context) - } -} - -#[derive(GraphQLObject, Debug, Clone)] -#[graphql( - description = "Validity proof for an output" - scalar = RollupsGraphQLScalarValue, -)] -struct OutputValidityProof { - #[graphql( - description = "Local input index within the context of the related epoch" - )] - pub input_index_within_epoch: i32, - - #[graphql( - description = "Output index within the context of the input that produced it" - )] - pub output_index_within_input: i32, - - #[graphql( - description = "Merkle root of all output hashes of the related input, given in Ethereum hex binary format (32 bytes), starting with '0x'" - )] - pub output_hashes_root_hash: String, - - #[graphql( - description = "Merkle root of all voucher hashes of the related epoch, given in Ethereum hex binary format (32 bytes), starting with '0x'" - )] - pub vouchers_epoch_root_hash: String, - - #[graphql( - description = "Merkle root of all notice hashes of the related epoch, given in Ethereum hex binary format (32 bytes), starting with '0x'" - )] - pub notices_epoch_root_hash: String, - - #[graphql( - description = "Hash of the machine state claimed for the related epoch, given in Ethereum hex binary format (32 bytes), starting with '0x'" - )] - pub machine_state_hash: String, - - #[graphql( - description = "Proof that this output hash is in the output-hashes merkle tree. This array of siblings is bottom-up ordered (from the leaf to the root). Each hash is given in Ethereum hex binary format (32 bytes), starting with '0x'." - )] - pub output_hash_in_output_hashes_siblings: Vec, - - #[graphql( - description = "Proof that this output-hashes root hash is in epoch's output merkle tree. This array of siblings is bottom-up ordered (from the leaf to the root). Each hash is given in Ethereum hex binary format (32 bytes), starting with '0x'." - )] - pub output_hashes_in_epoch_siblings: Vec, -} - -#[derive(Debug, Clone, GraphQLInputObject)] -#[graphql(scalar = RollupsGraphQLScalarValue)] -/// Filter object to restrict results depending on input properties -pub struct InputFilter { - /// Filter only inputs with index lower than a given value - pub index_lower_than: Option, - - /// Filter only inputs with index greater than a given value - pub index_greater_than: Option, -} - -impl From for InputQueryFilter { - fn from(filter: InputFilter) -> InputQueryFilter { - InputQueryFilter { - index_lower_than: filter.index_lower_than, - index_greater_than: filter.index_greater_than, - } - } -} - -#[derive(Debug, Clone, GraphQLObject)] -/// Page metadata for the cursor-based Connection pagination pattern -struct PageInfo { - /// Cursor pointing to the first entry of the page - start_cursor: Option, - - /// Cursor pointing to the last entry of the page - end_cursor: Option, - - /// Indicates if there are additional entries after the end curs - has_next_page: bool, - - /// Indicates if there are additional entries before the start curs - has_previous_page: bool, -} - -impl From<&DbPageInfo> for PageInfo { - fn from(page_info: &DbPageInfo) -> PageInfo { - PageInfo { - start_cursor: page_info - .start_cursor - .as_ref() - .map(|cursor| cursor.encode()), - end_cursor: page_info - .end_cursor - .as_ref() - .map(|cursor| cursor.encode()), - has_next_page: page_info.has_next_page, - has_previous_page: page_info.has_previous_page, - } - } -} - -/// Implement the Connection and Edge objects -macro_rules! impl_connection { - ($connection_name: literal, $edge_name: literal, $node: ty) => { - #[graphql_object( - name = $connection_name, - context = Context, - Scalar = RollupsGraphQLScalarValue, - description = "Pagination result" - )] - impl Connection<$node> { - #[graphql( - description = "Total number of entries that match the query" - )] - fn total_count(&self) -> i32 { - self.total_count - } - - #[graphql( - description = "Pagination entries returned for the current page" - )] - fn edges(&self) -> &Vec> { - &self.edges - } - - #[graphql(description = "Pagination metadata")] - fn page_info(&self) -> PageInfo { - (&self.page_info).into() - } - } - - #[graphql_object( - name = $edge_name - context = Context, - Scalar = RollupsGraphQLScalarValue, - description = "Pagination entry" - )] - impl Edge<$node> { - #[graphql(description = "Node instance")] - fn node(&self) -> &$node { - &self.node - } - - #[graphql(description = "Pagination cursor")] - fn cursor(&self) -> String { - self.cursor.encode() - } - } - }; -} - -impl_connection!("InputConnection", "InputEdge", Input); -impl_connection!("VoucherConnection", "VoucherEdge", Voucher); -impl_connection!("NoticeConnection", "NoticeEdge", Notice); -impl_connection!("ReportConnection", "ReportEdge", Report); - -fn convert_error(e: rollups_data::Error) -> FieldError { - tracing::warn!("Got error during query: {:?}", e); - e.into() -} - -pub fn hex_encode(data: &[u8]) -> String { - format!("0x{}", hex::encode(data)) -} diff --git a/offchain/graphql-server/src/schema/scalar.rs b/offchain/graphql-server/src/schema/scalar.rs deleted file mode 100644 index f2d2da2dc..000000000 --- a/offchain/graphql-server/src/schema/scalar.rs +++ /dev/null @@ -1,170 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) -// -// Parts of the code (BigInt scalar implementatation) is licenced -// under BSD 2-Clause Copyright (c) 2016, Magnus Hallin - -use juniper::parser::{ParseError, ScalarToken, Token}; -use juniper::{ - graphql_scalar, GraphQLScalarValue, ParseScalarResult, ScalarValue, Value, -}; - -/// Custom Graphql scalar definition, to be able to use long (signed 64) values -#[derive(Debug, Clone, PartialEq, GraphQLScalarValue)] -pub enum RollupsGraphQLScalarValue { - Int(i32), - BigInt(i64), - Float(f64), - String(String), - Boolean(bool), -} - -#[graphql_scalar(name = "BigInt")] -impl GraphQLScalar for i64 { - fn resolve(&self) -> Value { - // Convert to string because some clients can't handle 64 bits integers - Value::scalar(self.to_string()) - } - - fn from_input_value(v: &juniper::InputValue) -> Option { - v.as_scalar_value::().copied() - } - - fn from_str<'a>( - value: ScalarToken<'a>, - ) -> ParseScalarResult<'a, RollupsGraphQLScalarValue> { - if let ScalarToken::Int(v) = value { - v.parse() - .map_err(|_| ParseError::UnexpectedToken(Token::Scalar(value))) - .map(|s: i64| s.into()) - } else { - Err(ParseError::UnexpectedToken(Token::Scalar(value))) - } - } -} - -impl ScalarValue for RollupsGraphQLScalarValue { - type Visitor = RollupsGraphQLScalarValueVisitor; - - fn as_int(&self) -> Option { - match *self { - Self::Int(ref i) => Some(*i), - _ => None, - } - } - - fn as_string(&self) -> Option { - match *self { - Self::String(ref s) => Some(s.clone()), - _ => None, - } - } - - fn into_string(self) -> Option { - match self { - Self::String(s) => Some(s), - _ => None, - } - } - - fn as_str(&self) -> Option<&str> { - match *self { - Self::String(ref s) => Some(s.as_str()), - _ => None, - } - } - - fn as_float(&self) -> Option { - match *self { - Self::Int(ref i) => Some(*i as f64), - Self::Float(ref f) => Some(*f), - _ => None, - } - } - - fn as_boolean(&self) -> Option { - match *self { - Self::Boolean(ref b) => Some(*b), - _ => None, - } - } -} - -#[derive(Default)] -pub struct RollupsGraphQLScalarValueVisitor; - -impl<'de> serde::de::Visitor<'de> for RollupsGraphQLScalarValueVisitor { - type Value = RollupsGraphQLScalarValue; - - fn expecting( - &self, - formatter: &mut std::fmt::Formatter, - ) -> std::fmt::Result { - formatter.write_str("a valid input value") - } - - fn visit_bool( - self, - value: bool, - ) -> Result { - Ok(RollupsGraphQLScalarValue::Boolean(value)) - } - - fn visit_i32(self, value: i32) -> Result - where - E: serde::de::Error, - { - Ok(RollupsGraphQLScalarValue::Int(value)) - } - - fn visit_i64(self, value: i64) -> Result - where - E: serde::de::Error, - { - if value <= i32::max_value() as i64 { - self.visit_i32(value as i32) - } else { - Ok(RollupsGraphQLScalarValue::BigInt(value)) - } - } - - fn visit_u32(self, value: u32) -> Result - where - E: serde::de::Error, - { - if value <= i32::max_value() as u32 { - self.visit_i32(value as i32) - } else { - self.visit_u64(value as u64) - } - } - - fn visit_u64(self, value: u64) -> Result - where - E: serde::de::Error, - { - if value <= i64::MAX as u64 { - self.visit_i64(value as i64) - } else { - Ok(RollupsGraphQLScalarValue::Float(value as f64)) - } - } - - fn visit_f64(self, value: f64) -> Result { - Ok(RollupsGraphQLScalarValue::Float(value)) - } - - fn visit_str(self, value: &str) -> Result - where - E: serde::de::Error, - { - self.visit_string(value.into()) - } - - fn visit_string( - self, - value: String, - ) -> Result { - Ok(RollupsGraphQLScalarValue::String(value)) - } -} diff --git a/offchain/graphql-server/tests/integration.rs b/offchain/graphql-server/tests/integration.rs deleted file mode 100644 index e1d6ca6be..000000000 --- a/offchain/graphql-server/tests/integration.rs +++ /dev/null @@ -1,595 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use actix_web::dev::ServerHandle; -use actix_web::rt::spawn; -use awc::{Client, ClientRequest}; -use graphql_server::{http, schema::Context}; -use rollups_data::{ - CompletionStatus, Input, Notice, Proof, Report, Repository, Voucher, -}; -use std::fs::read_to_string; -use std::str::from_utf8; -use std::time::{Duration, UNIX_EPOCH}; -use test_fixtures::RepositoryFixture; -use testcontainers::clients::Cli; -use tokio::sync::oneshot; -use tokio::task::JoinHandle; - -const QUERY_PATH: &str = "tests/queries/"; -const RESPONSE_PATH: &str = "tests/responses/"; -const HOST: &str = "127.0.0.1"; -const PORT: u16 = 4003; - -struct TestState<'d> { - repository: RepositoryFixture<'d>, - server: GraphQLServerWrapper, -} - -impl TestState<'_> { - async fn setup(docker: &Cli) -> TestState<'_> { - let repository = RepositoryFixture::setup(docker); - let server = - GraphQLServerWrapper::spawn_server(repository.repository().clone()) - .await; - TestState { repository, server } - } - - async fn populate_database(&self) { - let input = Input { - index: 0, - msg_sender: "msg-sender".as_bytes().to_vec(), - tx_hash: "tx-hash".as_bytes().to_vec(), - block_number: 0, - timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), - payload: "input-0".as_bytes().to_vec(), - status: CompletionStatus::Accepted, - }; - - let notice = Notice { - input_index: 0, - index: 0, - payload: "notice-0-0".as_bytes().to_vec(), - }; - - let voucher = Voucher { - input_index: 0, - index: 0, - destination: "destination".as_bytes().to_vec(), - payload: "voucher-0-0".as_bytes().to_vec(), - }; - - let report = Report { - input_index: 0, - index: 0, - payload: "report-0-0".as_bytes().to_vec(), - }; - - let proof_voucher = Proof { - input_index: 0, - output_index: 0, - output_enum: rollups_data::OutputEnum::Voucher, - validity_input_index_within_epoch: 0, - validity_output_index_within_input: 0, - validity_output_hashes_root_hash: "".as_bytes().to_vec(), - validity_vouchers_epoch_root_hash: "".as_bytes().to_vec(), - validity_notices_epoch_root_hash: "".as_bytes().to_vec(), - validity_machine_state_hash: "".as_bytes().to_vec(), - validity_output_hash_in_output_hashes_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - validity_output_hashes_in_epoch_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - context: "".as_bytes().to_vec(), - }; - - let proof_notice = Proof { - input_index: 0, - output_index: 0, - output_enum: rollups_data::OutputEnum::Notice, - validity_input_index_within_epoch: 0, - validity_output_index_within_input: 0, - validity_output_hashes_root_hash: "".as_bytes().to_vec(), - validity_vouchers_epoch_root_hash: "" - .as_bytes() - .to_vec(), - validity_notices_epoch_root_hash: "".as_bytes().to_vec(), - validity_machine_state_hash: "".as_bytes().to_vec(), - validity_output_hash_in_output_hashes_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - validity_output_hashes_in_epoch_siblings: vec![Some( - "".as_bytes().to_vec(), - )], - context: "".as_bytes().to_vec(), - }; - - let repo = self.repository.repository(); - - repo.insert_input(input.clone()) - .expect("Failed to insert input"); - - repo.insert_notice(notice.clone()) - .expect("Failed to insert notice"); - - repo.insert_voucher(voucher.clone()) - .expect("Failed to insert voucher"); - - repo.insert_report(report.clone()) - .expect("Failed to insert report"); - - repo.insert_proof(proof_notice.clone()) - .expect("Failed to insert notice type proof"); - - repo.insert_proof(proof_voucher.clone()) - .expect("Failed to insert voucher type proof"); - } - - async fn populate_for_pagination(&self) { - let input = Input { - index: 0, - msg_sender: "msg-sender".as_bytes().to_vec(), - tx_hash: "tx-hash".as_bytes().to_vec(), - block_number: 0, - timestamp: UNIX_EPOCH + Duration::from_secs(1676489717), - payload: "input-0".as_bytes().to_vec(), - status: CompletionStatus::Accepted, - }; - - let notice0 = Notice { - input_index: 0, - index: 0, - payload: "notice-0-0".as_bytes().to_vec(), - }; - - let notice1 = Notice { - input_index: 0, - index: 1, - payload: "notice-0-1".as_bytes().to_vec(), - }; - - let notice2 = Notice { - input_index: 0, - index: 2, - payload: "notice-0-2".as_bytes().to_vec(), - }; - - let notice3 = Notice { - input_index: 0, - index: 3, - payload: "notice-0-3".as_bytes().to_vec(), - }; - - let notice4 = Notice { - input_index: 0, - index: 4, - payload: "notice-0-4".as_bytes().to_vec(), - }; - - let repo = self.repository.repository(); - - repo.insert_input(input.clone()) - .expect("Failed to insert input"); - - repo.insert_notice(notice0.clone()) - .expect("Failed to insert notice"); - - repo.insert_notice(notice1.clone()) - .expect("Failed to insert notice"); - - repo.insert_notice(notice2.clone()) - .expect("Failed to insert notice"); - - repo.insert_notice(notice3.clone()) - .expect("Failed to insert notice"); - - repo.insert_notice(notice4.clone()) - .expect("Failed to insert notice"); - } -} - -pub struct GraphQLServerWrapper { - server_handle: ServerHandle, - join_handle: JoinHandle>, -} - -impl GraphQLServerWrapper { - async fn spawn_server(repository: Repository) -> Self { - let context = Context::new(repository); - let (tx, rx) = oneshot::channel(); - - let join_handle = spawn( - async { - let service_handler = http::start_service(HOST, PORT, context) - .expect("failed to create server"); - tx.send(service_handler.handle()) - .expect("failed to send server handle"); - service_handler - } - .await, - ); - let server_handle = rx.await.expect("failed to received server handle"); - Self { - server_handle, - join_handle, - } - } - - pub async fn stop(self) { - self.server_handle.stop(true).await; - self.join_handle - .await - .expect("failed to stop graphql server") - .expect("failed to stop graphql server"); - } -} - -#[actix_web::test] -#[serial_test::serial] -async fn get_graphql() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - - let req = create_get_request("graphql"); - let res = req.send().await.expect("Should get from graphql"); - test.server.stop().await; - - assert_eq!(res.status(), awc::http::StatusCode::from_u16(200).unwrap()); - assert_eq!(res.headers().get("content-length").unwrap(), "19050"); -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_notice() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("notice.json").await; - assert_from_body(body, "notice.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_notice_with_input() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("notice_with_input.json").await; - assert_from_body(body, "notice_with_input.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_notice_with_proof() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("notice_with_proof.json").await; - assert_from_body(body, "notice_with_proof.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_proof_from_notice() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("proof_from_notice.json").await; - assert_from_body(body, "proof_from_notice.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_notices() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("notices.json").await; - assert_from_body(body, "notices.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_voucher() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("voucher.json").await; - assert_from_body(body, "voucher.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_voucher_with_input() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("voucher_with_input.json").await; - assert_from_body(body, "voucher_with_input.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_voucher_with_proof() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("voucher_with_proof.json").await; - assert_from_body(body, "voucher_with_proof.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_proof_from_voucher() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("proof_from_voucher.json").await; - assert_from_body(body, "proof_from_voucher.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_vouchers() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("vouchers.json").await; - assert_from_body(body, "vouchers.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_report() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("report.json").await; - assert_from_body(body, "report.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_report_with_input() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("report_with_input.json").await; - assert_from_body(body, "report_with_input.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_reports() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("reports.json").await; - assert_from_body(body, "reports.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_with_variables() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("variables.json").await; - assert_from_body(body, "variables.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_input() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("input.json").await; - assert_from_body(body, "input.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_input_with_voucher() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("input_with_voucher.json").await; - assert_from_body(body, "input_with_voucher.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_input_with_vouchers() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("input_with_vouchers.json").await; - assert_from_body(body, "input_with_vouchers.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_input_with_notice() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("input_with_notice.json").await; - assert_from_body(body, "input_with_notice.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_input_with_notices() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("input_with_notices.json").await; - assert_from_body(body, "input_with_notices.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_input_with_report() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("input_with_report.json").await; - assert_from_body(body, "input_with_report.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_input_with_reports() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("input_with_reports.json").await; - assert_from_body(body, "input_with_reports.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_inputs() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("inputs.json").await; - assert_from_body(body, "inputs.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_next_page() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_for_pagination().await; - - let body = post_query_request("next_page.json").await; - assert_from_body(body, "next_page.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_previous_page() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_for_pagination().await; - - let body = post_query_request("previous_page.json").await; - assert_from_body(body, "previous_page.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_error_missing_argument() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("error_missing_argument.json").await; - assert_from_body(body, "error_missing_argument.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_error_not_found() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("error_not_found.json").await; - assert_from_body(body, "error_not_found.json"); - test.server.stop().await; -} - -#[actix_web::test] -#[serial_test::serial] -async fn query_error_unknown_field() { - let docker = Cli::default(); - let test = TestState::setup(&docker).await; - test.populate_database().await; - - let body = post_query_request("error_unknown_field.json").await; - assert_from_body(body, "error_unknown_field.json"); - test.server.stop().await; -} - -fn create_get_request(endpoint: &str) -> ClientRequest { - let client = Client::default(); - - client - .get(format!("http://localhost:{}/{}", PORT, endpoint)) - .insert_header(("Content-type", "text/html; charset=utf-8")) -} - -async fn post_query_request(query_file: &str) -> actix_web::web::Bytes { - let query = String::from(QUERY_PATH) + query_file; - let client = Client::builder().timeout(Duration::from_secs(5)).finish(); - let mut response = client - .post(format!("http://localhost:{}/graphql", PORT)) - .insert_header(("Content-type", "application/json")) - .send_body(read_to_string(query).expect("Should read request file")) - .await - .expect("Should query server"); - - let body = response.body().await.expect("Should be body"); - - body -} - -fn assert_from_body(body: actix_web::web::Bytes, res_file: &str) { - let response = String::from(RESPONSE_PATH) + res_file; - assert_eq!( - from_utf8(&body).expect("Should contain response body"), - read_to_string(response).expect("Should read response file") - ); -} diff --git a/offchain/graphql-server/tests/queries/error_missing_argument.json b/offchain/graphql-server/tests/queries/error_missing_argument.json deleted file mode 100644 index 061bbf0e4..000000000 --- a/offchain/graphql-server/tests/queries/error_missing_argument.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notice(noticeIndex: 0){index, payload}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/error_not_found.json b/offchain/graphql-server/tests/queries/error_not_found.json deleted file mode 100644 index f717ba3be..000000000 --- a/offchain/graphql-server/tests/queries/error_not_found.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notice(noticeIndex: 1, inputIndex: 0){index, payload}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/error_unknown_field.json b/offchain/graphql-server/tests/queries/error_unknown_field.json deleted file mode 100644 index 23f5ab6c8..000000000 --- a/offchain/graphql-server/tests/queries/error_unknown_field.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notice(noticeIndex: 0, inputIndex: 0){test}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/input.json b/offchain/graphql-server/tests/queries/input.json deleted file mode 100644 index 49ed65209..000000000 --- a/offchain/graphql-server/tests/queries/input.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{input(index: 0){index, msgSender, timestamp, blockNumber, payload, status}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/input_with_notice.json b/offchain/graphql-server/tests/queries/input_with_notice.json deleted file mode 100644 index b51f749ca..000000000 --- a/offchain/graphql-server/tests/queries/input_with_notice.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{input(index: 0){index, notice(index: 0){index}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/input_with_notices.json b/offchain/graphql-server/tests/queries/input_with_notices.json deleted file mode 100644 index 902f6900f..000000000 --- a/offchain/graphql-server/tests/queries/input_with_notices.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{input(index: 0){index, notices(first: 10){totalCount, edges {node {index}}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/input_with_report.json b/offchain/graphql-server/tests/queries/input_with_report.json deleted file mode 100644 index acd35341b..000000000 --- a/offchain/graphql-server/tests/queries/input_with_report.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{input(index: 0){index, report(index: 0){index}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/input_with_reports.json b/offchain/graphql-server/tests/queries/input_with_reports.json deleted file mode 100644 index be750c6fa..000000000 --- a/offchain/graphql-server/tests/queries/input_with_reports.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{input(index: 0){index, reports(first: 10){totalCount, edges {node {index}}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/input_with_voucher.json b/offchain/graphql-server/tests/queries/input_with_voucher.json deleted file mode 100644 index 5bfaf5327..000000000 --- a/offchain/graphql-server/tests/queries/input_with_voucher.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{input(index: 0){index, voucher(index: 0){index}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/input_with_vouchers.json b/offchain/graphql-server/tests/queries/input_with_vouchers.json deleted file mode 100644 index 24803ed7c..000000000 --- a/offchain/graphql-server/tests/queries/input_with_vouchers.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{input(index: 0){index, vouchers(first: 10){totalCount, edges {node {index}}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/inputs.json b/offchain/graphql-server/tests/queries/inputs.json deleted file mode 100644 index c5f3da755..000000000 --- a/offchain/graphql-server/tests/queries/inputs.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{inputs(first: 10, where: {indexLowerThan: 5}){totalCount, edges {node {index, msgSender, timestamp, blockNumber, payload}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/next_page.json b/offchain/graphql-server/tests/queries/next_page.json deleted file mode 100644 index 18ad92fc2..000000000 --- a/offchain/graphql-server/tests/queries/next_page.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notices(first: 2, after: \"Mg==\"){totalCount, edges {node {index, payload}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/notice.json b/offchain/graphql-server/tests/queries/notice.json deleted file mode 100644 index 97361cabb..000000000 --- a/offchain/graphql-server/tests/queries/notice.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notice(noticeIndex: 0, inputIndex: 0){index, payload}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/notice_with_input.json b/offchain/graphql-server/tests/queries/notice_with_input.json deleted file mode 100644 index 5ef1aa4ea..000000000 --- a/offchain/graphql-server/tests/queries/notice_with_input.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notice(noticeIndex: 0, inputIndex: 0){index, input {index}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/notice_with_proof.json b/offchain/graphql-server/tests/queries/notice_with_proof.json deleted file mode 100644 index 97aa962ff..000000000 --- a/offchain/graphql-server/tests/queries/notice_with_proof.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notice(noticeIndex: 0, inputIndex: 0){index, proof {context}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/notices.json b/offchain/graphql-server/tests/queries/notices.json deleted file mode 100644 index c14b1f6f0..000000000 --- a/offchain/graphql-server/tests/queries/notices.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notices(first: 10){totalCount, edges {node {index, payload}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/previous_page.json b/offchain/graphql-server/tests/queries/previous_page.json deleted file mode 100644 index 4ff79080c..000000000 --- a/offchain/graphql-server/tests/queries/previous_page.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notices(last: 2, before: \"Mg==\"){totalCount, edges {node {index, payload}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/proof_from_notice.json b/offchain/graphql-server/tests/queries/proof_from_notice.json deleted file mode 100644 index 9579a89f5..000000000 --- a/offchain/graphql-server/tests/queries/proof_from_notice.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{notice(noticeIndex: 0, inputIndex: 0){proof{validity{inputIndexWithinEpoch, outputIndexWithinInput, outputHashesRootHash, vouchersEpochRootHash, noticesEpochRootHash, machineStateHash, outputHashInOutputHashesSiblings, outputHashesInEpochSiblings}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/proof_from_voucher.json b/offchain/graphql-server/tests/queries/proof_from_voucher.json deleted file mode 100644 index 280b09ca6..000000000 --- a/offchain/graphql-server/tests/queries/proof_from_voucher.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{voucher(voucherIndex: 0, inputIndex: 0){proof{validity{inputIndexWithinEpoch, outputIndexWithinInput, outputHashesRootHash, vouchersEpochRootHash, noticesEpochRootHash, machineStateHash, outputHashInOutputHashesSiblings, outputHashesInEpochSiblings}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/report.json b/offchain/graphql-server/tests/queries/report.json deleted file mode 100644 index e359e904e..000000000 --- a/offchain/graphql-server/tests/queries/report.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{report(reportIndex: 0, inputIndex: 0){index, payload}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/report_with_input.json b/offchain/graphql-server/tests/queries/report_with_input.json deleted file mode 100644 index 79e67e3b6..000000000 --- a/offchain/graphql-server/tests/queries/report_with_input.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{report(reportIndex: 0, inputIndex: 0){index, input {index}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/reports.json b/offchain/graphql-server/tests/queries/reports.json deleted file mode 100644 index ea3701be2..000000000 --- a/offchain/graphql-server/tests/queries/reports.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{reports(first: 10){totalCount, edges {node {index, payload}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/variables.json b/offchain/graphql-server/tests/queries/variables.json deleted file mode 100644 index 05bff708b..000000000 --- a/offchain/graphql-server/tests/queries/variables.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "query": "query getInput($inputIndex: Int!) { input(index: $inputIndex) { blockNumber } }", - "variables": { - "inputIndex": 0 - } -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/voucher.json b/offchain/graphql-server/tests/queries/voucher.json deleted file mode 100644 index a41494655..000000000 --- a/offchain/graphql-server/tests/queries/voucher.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{voucher(voucherIndex: 0, inputIndex: 0){index, destination, payload}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/voucher_with_input.json b/offchain/graphql-server/tests/queries/voucher_with_input.json deleted file mode 100644 index 7ac04854f..000000000 --- a/offchain/graphql-server/tests/queries/voucher_with_input.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{voucher(voucherIndex: 0, inputIndex: 0){index, input {index}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/voucher_with_proof.json b/offchain/graphql-server/tests/queries/voucher_with_proof.json deleted file mode 100644 index 3fccdbfaf..000000000 --- a/offchain/graphql-server/tests/queries/voucher_with_proof.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{voucher(voucherIndex: 0, inputIndex: 0){index, proof {context}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/queries/vouchers.json b/offchain/graphql-server/tests/queries/vouchers.json deleted file mode 100644 index 626ae4cca..000000000 --- a/offchain/graphql-server/tests/queries/vouchers.json +++ /dev/null @@ -1,3 +0,0 @@ -{ - "query": "{vouchers(first: 10){totalCount, edges {node {index, destination, payload}}}}" -} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/error_missing_argument.json b/offchain/graphql-server/tests/responses/error_missing_argument.json deleted file mode 100644 index a2da7b9d6..000000000 --- a/offchain/graphql-server/tests/responses/error_missing_argument.json +++ /dev/null @@ -1 +0,0 @@ -{"errors":[{"message":"Field \"notice\" argument \"inputIndex\" of type \"Int!\" is required but not provided","locations":[{"line":1,"column":2}]}]} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/error_not_found.json b/offchain/graphql-server/tests/responses/error_not_found.json deleted file mode 100644 index 09bbabae2..000000000 --- a/offchain/graphql-server/tests/responses/error_not_found.json +++ /dev/null @@ -1 +0,0 @@ -{"data":null,"errors":[{"message":"notice not found","locations":[{"line":1,"column":2}],"path":["notice"]}]} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/error_unknown_field.json b/offchain/graphql-server/tests/responses/error_unknown_field.json deleted file mode 100644 index 1a7d4433e..000000000 --- a/offchain/graphql-server/tests/responses/error_unknown_field.json +++ /dev/null @@ -1 +0,0 @@ -{"errors":[{"message":"Unknown field \"test\" on type \"Notice\"","locations":[{"line":1,"column":40}]}]} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/input.json b/offchain/graphql-server/tests/responses/input.json deleted file mode 100644 index 82bea2976..000000000 --- a/offchain/graphql-server/tests/responses/input.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"input":{"index":0,"msgSender":"0x6d73672d73656e646572","timestamp":"1676489717","blockNumber":"0","payload":"0x696e7075742d30","status":"ACCEPTED"}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/input_with_notice.json b/offchain/graphql-server/tests/responses/input_with_notice.json deleted file mode 100644 index 8c71d0d7b..000000000 --- a/offchain/graphql-server/tests/responses/input_with_notice.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"input":{"index":0,"notice":{"index":0}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/input_with_notices.json b/offchain/graphql-server/tests/responses/input_with_notices.json deleted file mode 100644 index 45f656449..000000000 --- a/offchain/graphql-server/tests/responses/input_with_notices.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"input":{"index":0,"notices":{"totalCount":1,"edges":[{"node":{"index":0}}]}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/input_with_report.json b/offchain/graphql-server/tests/responses/input_with_report.json deleted file mode 100644 index aa033ec47..000000000 --- a/offchain/graphql-server/tests/responses/input_with_report.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"input":{"index":0,"report":{"index":0}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/input_with_reports.json b/offchain/graphql-server/tests/responses/input_with_reports.json deleted file mode 100644 index 7c14d26ed..000000000 --- a/offchain/graphql-server/tests/responses/input_with_reports.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"input":{"index":0,"reports":{"totalCount":1,"edges":[{"node":{"index":0}}]}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/input_with_voucher.json b/offchain/graphql-server/tests/responses/input_with_voucher.json deleted file mode 100644 index e3f185514..000000000 --- a/offchain/graphql-server/tests/responses/input_with_voucher.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"input":{"index":0,"voucher":{"index":0}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/input_with_vouchers.json b/offchain/graphql-server/tests/responses/input_with_vouchers.json deleted file mode 100644 index 99fb230db..000000000 --- a/offchain/graphql-server/tests/responses/input_with_vouchers.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"input":{"index":0,"vouchers":{"totalCount":1,"edges":[{"node":{"index":0}}]}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/inputs.json b/offchain/graphql-server/tests/responses/inputs.json deleted file mode 100644 index 69fc44104..000000000 --- a/offchain/graphql-server/tests/responses/inputs.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"inputs":{"totalCount":1,"edges":[{"node":{"index":0,"msgSender":"0x6d73672d73656e646572","timestamp":"1676489717","blockNumber":"0","payload":"0x696e7075742d30"}}]}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/next_page.json b/offchain/graphql-server/tests/responses/next_page.json deleted file mode 100644 index b1c31b446..000000000 --- a/offchain/graphql-server/tests/responses/next_page.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"notices":{"totalCount":5,"edges":[{"node":{"index":3,"payload":"0x6e6f746963652d302d33"}},{"node":{"index":4,"payload":"0x6e6f746963652d302d34"}}]}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/notice.json b/offchain/graphql-server/tests/responses/notice.json deleted file mode 100644 index a7e66e24e..000000000 --- a/offchain/graphql-server/tests/responses/notice.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"notice":{"index":0,"payload":"0x6e6f746963652d302d30"}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/notice_with_input.json b/offchain/graphql-server/tests/responses/notice_with_input.json deleted file mode 100644 index a325acde9..000000000 --- a/offchain/graphql-server/tests/responses/notice_with_input.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"notice":{"index":0,"input":{"index":0}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/notice_with_proof.json b/offchain/graphql-server/tests/responses/notice_with_proof.json deleted file mode 100644 index a71d7e467..000000000 --- a/offchain/graphql-server/tests/responses/notice_with_proof.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"notice":{"index":0,"proof":{"context":"0x3c636f6e746578743e"}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/notices.json b/offchain/graphql-server/tests/responses/notices.json deleted file mode 100644 index 2ea97b605..000000000 --- a/offchain/graphql-server/tests/responses/notices.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"notices":{"totalCount":1,"edges":[{"node":{"index":0,"payload":"0x6e6f746963652d302d30"}}]}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/previous_page.json b/offchain/graphql-server/tests/responses/previous_page.json deleted file mode 100644 index 7e48c51f9..000000000 --- a/offchain/graphql-server/tests/responses/previous_page.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"notices":{"totalCount":5,"edges":[{"node":{"index":0,"payload":"0x6e6f746963652d302d30"}},{"node":{"index":1,"payload":"0x6e6f746963652d302d31"}}]}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/proof_from_notice.json b/offchain/graphql-server/tests/responses/proof_from_notice.json deleted file mode 100644 index 0179c1b49..000000000 --- a/offchain/graphql-server/tests/responses/proof_from_notice.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"notice":{"proof":{"validity":{"inputIndexWithinEpoch":0,"outputIndexWithinInput":0,"outputHashesRootHash":"0x3c6f74686572686173683e","vouchersEpochRootHash":"0x3c6f74686572686173683e","noticesEpochRootHash":"0x3c6f74686572686173683e","machineStateHash":"0x3c6f74686572686173683e","outputHashInOutputHashesSiblings":["0x3c6f7468657261727261793e"],"outputHashesInEpochSiblings":["0x3c6f7468657261727261793e"]}}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/proof_from_voucher.json b/offchain/graphql-server/tests/responses/proof_from_voucher.json deleted file mode 100644 index 5db703686..000000000 --- a/offchain/graphql-server/tests/responses/proof_from_voucher.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"voucher":{"proof":{"validity":{"inputIndexWithinEpoch":0,"outputIndexWithinInput":0,"outputHashesRootHash":"0x3c686173683e","vouchersEpochRootHash":"0x3c686173683e","noticesEpochRootHash":"0x3c686173683e","machineStateHash":"0x3c686173683e","outputHashInOutputHashesSiblings":["0x3c61727261793e"],"outputHashesInEpochSiblings":["0x3c61727261793e"]}}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/report.json b/offchain/graphql-server/tests/responses/report.json deleted file mode 100644 index 6dbfcee97..000000000 --- a/offchain/graphql-server/tests/responses/report.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"report":{"index":0,"payload":"0x7265706f72742d302d30"}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/report_with_input.json b/offchain/graphql-server/tests/responses/report_with_input.json deleted file mode 100644 index 52f9deefa..000000000 --- a/offchain/graphql-server/tests/responses/report_with_input.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"report":{"index":0,"input":{"index":0}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/reports.json b/offchain/graphql-server/tests/responses/reports.json deleted file mode 100644 index 21a84395f..000000000 --- a/offchain/graphql-server/tests/responses/reports.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"reports":{"totalCount":1,"edges":[{"node":{"index":0,"payload":"0x7265706f72742d302d30"}}]}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/variables.json b/offchain/graphql-server/tests/responses/variables.json deleted file mode 100644 index caf6f4e92..000000000 --- a/offchain/graphql-server/tests/responses/variables.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"input":{"blockNumber":"0"}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/voucher.json b/offchain/graphql-server/tests/responses/voucher.json deleted file mode 100644 index dc605b935..000000000 --- a/offchain/graphql-server/tests/responses/voucher.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"voucher":{"index":0,"destination":"0x64657374696e6174696f6e","payload":"0x766f75636865722d302d30"}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/voucher_with_input.json b/offchain/graphql-server/tests/responses/voucher_with_input.json deleted file mode 100644 index 3d6e36503..000000000 --- a/offchain/graphql-server/tests/responses/voucher_with_input.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"voucher":{"index":0,"input":{"index":0}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/voucher_with_proof.json b/offchain/graphql-server/tests/responses/voucher_with_proof.json deleted file mode 100644 index 70b74f348..000000000 --- a/offchain/graphql-server/tests/responses/voucher_with_proof.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"voucher":{"index":0,"proof":{"context":"0x3c636f6e746578743e"}}}} \ No newline at end of file diff --git a/offchain/graphql-server/tests/responses/vouchers.json b/offchain/graphql-server/tests/responses/vouchers.json deleted file mode 100644 index 6dc850c1f..000000000 --- a/offchain/graphql-server/tests/responses/vouchers.json +++ /dev/null @@ -1 +0,0 @@ -{"data":{"vouchers":{"totalCount":1,"edges":[{"node":{"index":0,"destination":"0x64657374696e6174696f6e","payload":"0x766f75636865722d302d30"}}]}}} \ No newline at end of file diff --git a/offchain/grpc-interfaces/Cargo.toml b/offchain/grpc-interfaces/Cargo.toml deleted file mode 100644 index 7032b4341..000000000 --- a/offchain/grpc-interfaces/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "grpc-interfaces" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -tonic.workspace = true -prost.workspace = true - -[build-dependencies] -tonic-build.workspace = true - -[package.metadata.cargo-machete] -ignored = ["prost"] diff --git a/offchain/grpc-interfaces/build.rs b/offchain/grpc-interfaces/build.rs deleted file mode 100644 index 9fd565c5b..000000000 --- a/offchain/grpc-interfaces/build.rs +++ /dev/null @@ -1,18 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -fn main() -> Result<(), Box> { - tonic_build::configure() - .protoc_arg("--experimental_allow_proto3_optional") - .compile( - &[ - "./grpc-interfaces/versioning.proto", - "./grpc-interfaces/server-manager.proto", - ], - &["./grpc-interfaces"], - )?; - println!("cargo:rerun-if-changed=./grpc-interfaces/versioning.proto"); - println!("cargo:rerun-if-changed=./grpc-interfaces/server-manager.proto"); - println!("cargo:rerun-if-changed=build.rs"); - Ok(()) -} diff --git a/offchain/grpc-interfaces/grpc-interfaces b/offchain/grpc-interfaces/grpc-interfaces deleted file mode 160000 index cba7c3f1c..000000000 --- a/offchain/grpc-interfaces/grpc-interfaces +++ /dev/null @@ -1 +0,0 @@ -Subproject commit cba7c3f1c53b83bee83d8a1e5a1074591279220f diff --git a/offchain/grpc-interfaces/src/lib.rs b/offchain/grpc-interfaces/src/lib.rs deleted file mode 100644 index 14d404f54..000000000 --- a/offchain/grpc-interfaces/src/lib.rs +++ /dev/null @@ -1,14 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub mod versioning { - tonic::include_proto!("versioning"); -} - -pub mod cartesi_machine { - tonic::include_proto!("cartesi_machine"); -} - -pub mod cartesi_server_manager { - tonic::include_proto!("cartesi_server_manager"); -} diff --git a/offchain/host-runner/Cargo.toml b/offchain/host-runner/Cargo.toml deleted file mode 100644 index 51eaab757..000000000 --- a/offchain/host-runner/Cargo.toml +++ /dev/null @@ -1,38 +0,0 @@ -[package] -name = "host-runner" -edition.workspace = true -license.workspace = true -version.workspace = true - -[[bin]] -name = "cartesi-rollups-host-runner" -path = "src/main.rs" - -[dependencies] -grpc-interfaces = { path = "../grpc-interfaces" } -http-health-check = { path = "../http-health-check" } -log = { path = "../log" } - -actix-web.workspace = true -async-trait.workspace = true -byteorder.workspace = true -clap = { workspace = true, features = ["derive", "env"] } -ethabi.workspace = true -futures-util.workspace = true -hex.workspace = true -reqwest = { workspace = true, features = ["json"] } -serde = { workspace = true, features = ["derive"] } -sha3 = { workspace = true, features = ["std"] } -snafu.workspace = true -tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } -tonic.workspace = true -tonic-health.workspace = true -tracing.workspace = true - -[dev-dependencies] -rollups-http-client = { path = "../rollups-http-client" } - -mockall.workspace = true -rand.workspace = true -serial_test.workspace = true -tracing-test.workspace = true diff --git a/offchain/host-runner/README.md b/offchain/host-runner/README.md deleted file mode 100644 index c82367d67..000000000 --- a/offchain/host-runner/README.md +++ /dev/null @@ -1,13 +0,0 @@ -# Host Runner - -This project implements the gRPC server-manager API. -Different from the server-manager, the host-runner does not instantiate a Cartesi machine. -Instead, it receives HTTP requests directly from a DApp running in the host machine. - -## Tests - -As a complement to the usual [test procedure](../README.md#tests), it is possible to enable verbose logging for integration testing by setting the following environment variable: - -```shell -export CARTESI_TEST_VERBOSE=1 -``` diff --git a/offchain/host-runner/src/config.rs b/offchain/host-runner/src/config.rs deleted file mode 100644 index 9d7873b90..000000000 --- a/offchain/host-runner/src/config.rs +++ /dev/null @@ -1,77 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; -use log::{LogConfig, LogEnvCliConfig}; - -const DEFAULT_ADDRESS: &str = "0.0.0.0"; - -#[derive(Debug, Clone)] -pub struct Config { - pub log_config: LogConfig, - pub grpc_server_manager_address: String, - pub grpc_server_manager_port: u16, - pub http_inspect_address: String, - pub http_inspect_port: u16, - pub http_rollup_server_address: String, - pub http_rollup_server_port: u16, - pub finish_timeout: u64, - pub healthcheck_port: u16, -} - -#[derive(Parser)] -#[command(name = "host_runner_config")] -#[command(about = "Configuration for host-runner")] -pub struct CLIConfig { - /// Logs Config - #[command(flatten)] - pub log_config: LogEnvCliConfig, - - /// gRPC address of the Server Manager endpoint - #[arg(long, env, default_value = DEFAULT_ADDRESS)] - pub grpc_server_manager_address: String, - - /// gRPC port of the Server Manager endpoint - #[arg(long, env, default_value = "5001")] - pub grpc_server_manager_port: u16, - - /// HTTP address of the Inspect endpoint - #[arg(long, env, default_value = DEFAULT_ADDRESS)] - pub http_inspect_address: String, - - /// HTTP port of the Inspect endpoint - #[arg(long, env, default_value = "5002")] - pub http_inspect_port: u16, - - /// HTTP address of the Rollup Server endpoint - #[arg(long, env, default_value = DEFAULT_ADDRESS)] - pub http_rollup_server_address: String, - - /// HTTP port of the Rollup Server endpoint - #[arg(long, env, default_value = "5004")] - pub http_rollup_server_port: u16, - - /// Duration in ms for the finish request to timeout - #[arg(long, env, default_value = "10000")] - pub finish_timeout: u64, - - /// Port of health check - #[arg(long, env = "HOST_RUNNER_HEALTHCHECK_PORT", default_value_t = 8080)] - pub healthcheck_port: u16, -} - -impl From for Config { - fn from(cli_config: CLIConfig) -> Self { - Self { - log_config: cli_config.log_config.into(), - grpc_server_manager_address: cli_config.grpc_server_manager_address, - grpc_server_manager_port: cli_config.grpc_server_manager_port, - http_inspect_address: cli_config.http_inspect_address, - http_inspect_port: cli_config.http_inspect_port, - http_rollup_server_address: cli_config.http_rollup_server_address, - http_rollup_server_port: cli_config.http_rollup_server_port, - finish_timeout: cli_config.finish_timeout, - healthcheck_port: cli_config.healthcheck_port, - } - } -} diff --git a/offchain/host-runner/src/controller.rs b/offchain/host-runner/src/controller.rs deleted file mode 100644 index 51d50e5e4..000000000 --- a/offchain/host-runner/src/controller.rs +++ /dev/null @@ -1,941 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use async_trait::async_trait; -use snafu::Snafu; -use std::time::Duration; -use tokio::sync::{mpsc, oneshot}; - -use crate::model::*; - -const MPSC_BUFFER_SIZE: usize = 1000; - -/// State-machine that controls the rollup state. -#[derive(Clone, Debug)] -pub struct Controller { - advance_tx: mpsc::Sender, - inspect_tx: mpsc::Sender, - finish_tx: mpsc::Sender, - voucher_tx: mpsc::Sender, - notice_tx: mpsc::Sender, - report_tx: mpsc::Sender, - exception_tx: mpsc::Sender, - shutdown_tx: mpsc::Sender, -} - -impl Controller { - pub fn new(finish_timeout: Duration) -> Self { - let (advance_tx, advance_rx) = mpsc::channel(MPSC_BUFFER_SIZE); - let (inspect_tx, inspect_rx) = mpsc::channel(MPSC_BUFFER_SIZE); - let (voucher_tx, voucher_rx) = mpsc::channel(MPSC_BUFFER_SIZE); - let (notice_tx, notice_rx) = mpsc::channel(MPSC_BUFFER_SIZE); - let (report_tx, report_rx) = mpsc::channel(MPSC_BUFFER_SIZE); - let (finish_tx, finish_rx) = mpsc::channel(MPSC_BUFFER_SIZE); - let (exception_tx, exception_rx) = mpsc::channel(MPSC_BUFFER_SIZE); - let (shutdown_tx, shutdown_rx) = mpsc::channel(MPSC_BUFFER_SIZE); - let data = SharedStateData { - advance_rx, - inspect_rx, - voucher_rx, - notice_rx, - report_rx, - finish_rx, - exception_rx, - shutdown_rx, - finish_timeout, - }; - let service = Service::new(data); - tokio::spawn(service.run()); - Self { - advance_tx, - inspect_tx, - voucher_tx, - notice_tx, - report_tx, - finish_tx, - exception_tx, - shutdown_tx, - } - } - - pub async fn advance( - &self, - request: AdvanceStateRequest, - ) -> oneshot::Receiver { - SyncRequest::send(&self.advance_tx, request).await - } - - pub async fn inspect( - &self, - request: InspectStateRequest, - ) -> oneshot::Receiver { - SyncRequest::send(&self.inspect_tx, request).await - } - - pub async fn finish( - &self, - status: FinishStatus, - ) -> oneshot::Receiver> { - SyncRequest::send(&self.finish_tx, status).await - } - - pub async fn insert_voucher( - &self, - voucher: Voucher, - ) -> oneshot::Receiver> { - SyncRequest::send(&self.voucher_tx, voucher).await - } - - pub async fn insert_notice( - &self, - notice: Notice, - ) -> oneshot::Receiver> { - SyncRequest::send(&self.notice_tx, notice).await - } - - pub async fn insert_report( - &self, - report: Report, - ) -> oneshot::Receiver> { - SyncRequest::send(&self.report_tx, report).await - } - - pub async fn notify_exception( - &self, - exception: RollupException, - ) -> oneshot::Receiver> { - SyncRequest::send(&self.exception_tx, exception).await - } - - pub async fn shutdown(&self) -> oneshot::Receiver<()> { - SyncRequest::send(&self.shutdown_tx, ()).await - } -} - -#[derive(Debug, PartialEq, Snafu)] -pub enum ControllerError { - #[snafu(display("no rollup request available"))] - FetchRequestTimeout, - #[snafu(display( - "invalid request {} in {} state", - request_name, - state_name - ))] - InvalidRequest { - request_name: String, - state_name: String, - }, -} - -struct Service { - state: Box, -} - -impl Service { - fn new(data: SharedStateData) -> Self { - Self { - state: Box::new(IdleState::new(data)), - } - } - - async fn run(mut self) { - loop { - if let Some(state) = self.state.process().await { - self.state = state; - } else { - tracing::info!("controller service terminated successfully"); - break; - } - } - } - - fn handle_invalid( - request: SyncRequest>, - state: Box, - request_name: &str, - ) -> Option> - where - T: std::fmt::Debug + Send + Sync, - U: std::fmt::Debug + Send + Sync, - { - let err = ControllerError::InvalidRequest { - state_name: state.name(), - request_name: request_name.into(), - }; - tracing::warn!("{}", err.to_string()); - let (_, response_tx) = request.into_inner(); - send_response(response_tx, Err(err)); - Some(state) - } - - fn shutdown(request: SyncShutdownRequest) -> Option> { - tracing::info!("processing shutdown request"); - request.process(|_| ()); - None - } -} - -struct SyncRequest -where - T: Send + Sync, - U: Send + Sync, -{ - request: T, - response_tx: oneshot::Sender, -} - -impl SyncRequest -where - T: std::fmt::Debug + Send + Sync, - U: std::fmt::Debug + Send + Sync, -{ - async fn send(tx: &mpsc::Sender, request: T) -> oneshot::Receiver { - let (response_tx, response_rx) = oneshot::channel(); - if let Err(e) = tx - .send(SyncRequest { - request, - response_tx, - }) - .await - { - tracing::error!("failed to send request ({})", e) - } - response_rx - } - - fn into_inner(self) -> (T, oneshot::Sender) { - (self.request, self.response_tx) - } - - fn process(self, f: F) - where - F: FnOnce(T) -> U, - { - let response = f(self.request); - send_response(self.response_tx, response); - } -} - -fn send_response(tx: oneshot::Sender, response: U) -where - U: std::fmt::Debug + Send + Sync, -{ - if tx.send(response).is_err() { - tracing::warn!("failed to send response (channel dropped)"); - } -} - -impl std::fmt::Debug for SyncRequest -where - T: std::fmt::Debug + Send + Sync, - U: std::fmt::Debug + Send + Sync, -{ - fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { - write!(f, "{:?}", self.request) - } -} - -type SyncAdvanceStateRequest = SyncRequest; -type SyncInspectResult = SyncRequest; -type SyncFinishRequest = - SyncRequest>; -type SyncVoucherRequest = SyncRequest>; -type SyncNoticeRequest = SyncRequest>; -type SyncReportRequest = SyncRequest>; -type SyncExceptionRequest = - SyncRequest>; -type SyncShutdownRequest = SyncRequest<(), ()>; - -struct SharedStateData { - advance_rx: mpsc::Receiver, - inspect_rx: mpsc::Receiver, - finish_rx: mpsc::Receiver, - voucher_rx: mpsc::Receiver, - notice_rx: mpsc::Receiver, - report_rx: mpsc::Receiver, - exception_rx: mpsc::Receiver, - shutdown_rx: mpsc::Receiver, - finish_timeout: Duration, -} - -/// OOP state design-pattern -#[async_trait] -trait State: Send + Sync { - async fn process(self: Box) -> Option>; - fn name(&self) -> String; -} - -/// The controller waits for finish request from the DApp -struct IdleState { - data: SharedStateData, -} - -impl IdleState { - fn new(data: SharedStateData) -> Self { - Self { data } - } -} - -#[async_trait] -impl State for IdleState { - async fn process(mut self: Box) -> Option> { - tokio::select! { - biased; - Some(request) = self.data.finish_rx.recv() => { - tracing::debug!("received finish request; changing state to fetch request"); - tracing::debug!("request: {:?}", request); - let (_, response_tx) = request.into_inner(); - Some(Box::new(FetchRequestState::new(self.data, response_tx))) - } - Some(request) = self.data.voucher_rx.recv() => { - Service::handle_invalid(request, self, "voucher") - } - Some(request) = self.data.notice_rx.recv() => { - Service::handle_invalid(request, self, "notice") - } - Some(request) = self.data.report_rx.recv() => { - Service::handle_invalid(request, self, "report") - } - Some(request) = self.data.exception_rx.recv() => { - Service::handle_invalid(request, self, "exception") - } - Some(request) = self.data.shutdown_rx.recv() => { - Service::shutdown(request) - } - } - } - - fn name(&self) -> String { - "idle".into() - } -} - -/// The controller waits for either an inspect of an advance request from the gRPC service -struct FetchRequestState { - data: SharedStateData, - finish_response_tx: oneshot::Sender>, -} - -impl FetchRequestState { - fn new( - data: SharedStateData, - finish_response_tx: oneshot::Sender< - Result, - >, - ) -> Self { - Self { - data, - finish_response_tx, - } - } -} - -#[async_trait] -impl State for FetchRequestState { - async fn process(mut self: Box) -> Option> { - tokio::select! { - biased; - _ = tokio::time::sleep(self.data.finish_timeout) => { - tracing::debug!("fetch request timed out; setting state to idle"); - let timeout_err = ControllerError::FetchRequestTimeout; - send_response(self.finish_response_tx, Err(timeout_err)); - Some(Box::new(IdleState::new(self.data))) - } - Some(request) = self.data.inspect_rx.recv() => { - tracing::debug!("received inspect request; setting state to inspect"); - tracing::debug!("request: {:?}", request); - let (inspect_request, inspect_response_tx) = request.into_inner(); - let rollup_request = RollupRequest::InspectState(inspect_request); - send_response(self.finish_response_tx, Ok(rollup_request)); - Some(Box::new(InspectState::new(self.data, inspect_response_tx))) - } - Some(request) = self.data.advance_rx.recv() => { - tracing::debug!("received advance request; setting state to advance"); - tracing::debug!("request: {:?}", request); - let (advance_request, advance_response_tx) = request.into_inner(); - let rollup_request = RollupRequest::AdvanceState(advance_request); - send_response(self.finish_response_tx, Ok(rollup_request)); - Some(Box::new(AdvanceState::new(self.data, advance_response_tx))) - } - Some(request) = self.data.finish_rx.recv() => { - tracing::debug!("received finish request; terminating previous finish request"); - tracing::debug!("request: {:?}", request); - let timeout_err = ControllerError::FetchRequestTimeout; - send_response(self.finish_response_tx, Err(timeout_err)); - let (_, response_tx) = request.into_inner(); - Some(Box::new(FetchRequestState::new(self.data, response_tx))) - } - Some(request) = self.data.voucher_rx.recv() => { - Service::handle_invalid(request, self, "voucher") - } - Some(request) = self.data.notice_rx.recv() => { - Service::handle_invalid(request, self, "notice") - } - Some(request) = self.data.report_rx.recv() => { - Service::handle_invalid(request, self, "report") - } - Some(request) = self.data.exception_rx.recv() => { - Service::handle_invalid(request, self, "exception") - } - Some(request) = self.data.shutdown_rx.recv() => { - Service::shutdown(request) - } - } - } - - fn name(&self) -> String { - "fetch request".into() - } -} - -/// The controller wait for reports, exception, and finish -struct InspectState { - data: SharedStateData, - inspect_response_tx: oneshot::Sender, - reports: Vec, -} - -impl InspectState { - fn new( - data: SharedStateData, - inspect_response_tx: oneshot::Sender, - ) -> Self { - Self { - data, - inspect_response_tx, - reports: vec![], - } - } -} - -#[async_trait] -impl State for InspectState { - async fn process(mut self: Box) -> Option> { - tokio::select! { - biased; - Some(request) = self.data.finish_rx.recv() => { - tracing::debug!("received finish request; changing state to fetch request"); - tracing::debug!("request: {:?}", request); - let (status, response_tx) = request.into_inner(); - let result = match status { - FinishStatus::Accept => InspectResult::accepted(self.reports), - FinishStatus::Reject => InspectResult::rejected(self.reports), - }; - send_response(self.inspect_response_tx, result); - Some(Box::new(FetchRequestState::new(self.data, response_tx))) - } - Some(request) = self.data.report_rx.recv() => { - tracing::debug!("received report request"); - tracing::debug!("request: {:?}", request); - request.process(|report| { - self.reports.push(report); - Ok(()) - }); - Some(self) - } - Some(request) = self.data.exception_rx.recv() => { - tracing::debug!("received exception request; setting state to idle"); - tracing::debug!("request: {:?}", request); - let (exception, exception_response_tx) = request.into_inner(); - let result = InspectResult::exception(self.reports, exception); - send_response(self.inspect_response_tx, result); - send_response(exception_response_tx, Ok(())); - Some(Box::new(IdleState::new(self.data))) - } - Some(request) = self.data.voucher_rx.recv() => { - Service::handle_invalid(request, self, "voucher") - } - Some(request) = self.data.notice_rx.recv() => { - Service::handle_invalid(request, self, "notice") - } - Some(request) = self.data.shutdown_rx.recv() => { - Service::shutdown(request) - } - } - } - - fn name(&self) -> String { - "inspect".into() - } -} - -/// The controller waits for vouchers, notices, reports, exception, and finish -struct AdvanceState { - data: SharedStateData, - advance_response_tx: oneshot::Sender, - vouchers: Vec, - notices: Vec, - reports: Vec, -} - -impl AdvanceState { - fn new( - data: SharedStateData, - advance_response_tx: oneshot::Sender, - ) -> Self { - Self { - data, - advance_response_tx, - vouchers: vec![], - notices: vec![], - reports: vec![], - } - } -} - -#[async_trait] -impl State for AdvanceState { - async fn process(mut self: Box) -> Option> { - tokio::select! { - biased; - Some(request) = self.data.finish_rx.recv() => { - tracing::debug!("received finish request; changing state to fetch request"); - tracing::debug!("request: {:?}", request); - let (status, response_tx) = request.into_inner(); - let result = match status { - FinishStatus::Accept => { - AdvanceResult::accepted( - self.vouchers, - self.notices, - self.reports, - ) - }, - FinishStatus::Reject => { - AdvanceResult::rejected( - self.reports, - ) - }, - }; - send_response(self.advance_response_tx, result); - Some(Box::new(FetchRequestState::new(self.data, response_tx))) - } - Some(request) = self.data.voucher_rx.recv() => { - tracing::debug!("received voucher request"); - tracing::debug!("request: {:?}", request); - request.process(|voucher| { - self.vouchers.push(voucher); - Ok(self.vouchers.len() - 1) - }); - Some(self) - } - Some(request) = self.data.notice_rx.recv() => { - tracing::debug!("received notice request"); - tracing::debug!("request: {:?}", request); - request.process(|notice| { - self.notices.push(notice); - Ok(self.notices.len() - 1) - }); - Some(self) - } - Some(request) = self.data.report_rx.recv() => { - tracing::debug!("received report request"); - tracing::debug!("request: {:?}", request); - request.process(|report| { - self.reports.push(report); - Ok(()) - }); - Some(self) - } - Some(request) = self.data.exception_rx.recv() => { - tracing::debug!("received exception request; setting state to idle"); - tracing::debug!("request: {:?}", request); - let (exception, exception_response_tx) = request.into_inner(); - let result = AdvanceResult::exception( - exception, - self.reports, - ); - send_response(self.advance_response_tx, result); - send_response(exception_response_tx, Ok(())); - Some(Box::new(IdleState::new(self.data))) - } - Some(request) = self.data.shutdown_rx.recv() => { - Service::shutdown(request) - } - } - } - - fn name(&self) -> String { - "advance".into() - } -} - -#[cfg(test)] -mod tests { - use super::*; - - const TEST_FINISH_TIMEOUT: Duration = Duration::from_millis(100); - - fn setup() -> Controller { - Controller::new(TEST_FINISH_TIMEOUT) - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_rejects_invalid_requests_in_idle_state() { - let controller = setup(); - let rx = controller.insert_voucher(mock_voucher()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("voucher"), - state_name: String::from("idle") - } - ); - let rx = controller.insert_notice(mock_notice()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("notice"), - state_name: String::from("idle") - } - ); - let rx = controller.insert_report(mock_report()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("report"), - state_name: String::from("idle") - } - ); - let rx = controller.notify_exception(mock_exception()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("exception"), - state_name: String::from("idle") - } - ); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_handles_multiple_finish_requests_at_the_same_time() { - let controller = setup(); - let mut handlers = vec![]; - const N: usize = 3; - for _ in 0..N { - let handler = { - let controller = controller.clone(); - tokio::spawn(async move { - controller.finish(FinishStatus::Accept).await - }) - }; - handlers.push(handler); - } - for handler in handlers { - let rx = handler.await.unwrap(); - let timeout_err = rx.await.unwrap().unwrap_err(); - assert_eq!(timeout_err, ControllerError::FetchRequestTimeout); - } - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_rejects_invalid_requests_in_fetch_request_state() { - let controller = setup(); - // Set state to fetch request by calling finish once in another thread - let _ = controller.finish(FinishStatus::Accept).await; - let rx = controller.insert_voucher(mock_voucher()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("voucher"), - state_name: String::from("fetch request") - } - ); - let rx = controller.insert_notice(mock_notice()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("notice"), - state_name: String::from("fetch request") - } - ); - let rx = controller.insert_report(mock_report()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("report"), - state_name: String::from("fetch request") - } - ); - let rx = controller.notify_exception(mock_exception()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("exception"), - state_name: String::from("fetch request") - } - ); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_rejects_invalid_requests_during_inspect() { - let controller = setup(); - let _ = controller.inspect(mock_inspect_request()).await; - let _ = controller - .finish(FinishStatus::Accept) - .await - .await - .unwrap() - .unwrap(); - let rx = controller.insert_voucher(mock_voucher()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("voucher"), - state_name: String::from("inspect") - } - ); - let rx = controller.insert_notice(mock_notice()).await; - assert_eq!( - rx.await.unwrap().unwrap_err(), - ControllerError::InvalidRequest { - request_name: String::from("notice"), - state_name: String::from("inspect") - } - ); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_advances_state_before_finish() { - let controller = setup(); - // Send advance request - let advance_request = mock_advance_request(); - let advance_rx = controller.advance(advance_request.clone()).await; - // Send first finish request - let finish_rx = controller.finish(FinishStatus::Accept).await; - let rollup_request = finish_rx.await.unwrap().unwrap(); - assert_eq!( - rollup_request, - RollupRequest::AdvanceState(advance_request) - ); - // Send second finish request - let _ = controller.finish(FinishStatus::Accept).await; - // Obtain result from advance request - let advance_result = advance_rx.await.unwrap(); - assert_eq!( - advance_result, - AdvanceResult::accepted(vec![], vec![], vec![]) - ); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_advances_state_after_finish() { - let controller = setup(); - // Send first finish request - let finish_rx = controller.finish(FinishStatus::Accept).await; - // Send advance request - let advance_request = mock_advance_request(); - let advance_rx = controller.advance(advance_request.clone()).await; - // Receive first finish result - let rollup_request = finish_rx.await.unwrap().unwrap(); - assert_eq!( - rollup_request, - RollupRequest::AdvanceState(advance_request) - ); - // Send second finish request - let _ = controller.finish(FinishStatus::Accept).await; - // Obtain result from advance request - let advance_result = advance_rx.await.unwrap(); - assert_eq!( - advance_result, - AdvanceResult::accepted(vec![], vec![], vec![]) - ); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_advances_state_after_previous_advance() { - let controller = setup(); - const N: usize = 3; - let mut advance_requests = std::collections::VecDeque::new(); - let mut advance_rxs = std::collections::VecDeque::new(); - // Send several advance requests before starting - for _ in 0..N { - let request = mock_advance_request(); - advance_requests.push_back(request.clone()); - let rx = controller.advance(request).await; - advance_rxs.push_back(rx); - } - // Send first finish - let mut finish_rx = controller.finish(FinishStatus::Accept).await; - // Process each advance request - while !advance_requests.is_empty() { - let rollup_request = finish_rx.await.unwrap().unwrap(); - let expected_request = advance_requests.pop_front().unwrap(); - assert_eq!( - rollup_request, - RollupRequest::AdvanceState(expected_request) - ); - finish_rx = controller.finish(FinishStatus::Accept).await; - let _ = advance_rxs.pop_front().unwrap().await.unwrap(); - } - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_prioritizes_inspect_over_advance_requests() { - let controller = setup(); - // Before first finish, send first an advance request and an inspect request - let _ = controller.advance(mock_advance_request()).await; - let _ = controller.inspect(mock_inspect_request()).await; - // The received request should be the inspect state - let finish_rx = controller.finish(FinishStatus::Accept).await; - let rollup_request = finish_rx.await.unwrap().unwrap(); - assert!(matches!(rollup_request, RollupRequest::InspectState(_))); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_times_out_during_fetch_request() { - let controller = setup(); - // Send first finish request without sending a rollup request - let finish_rx = controller.finish(FinishStatus::Accept).await; - let timeout_err = finish_rx.await.unwrap().unwrap_err(); - assert_eq!(timeout_err, ControllerError::FetchRequestTimeout); - // Send an advance request that should not timeout - let advance_request = mock_advance_request(); - let _ = controller.advance(advance_request.clone()).await; - let finish_rx = controller.finish(FinishStatus::Accept).await; - let rollup_request = finish_rx.await.unwrap().unwrap(); - assert_eq!( - rollup_request, - RollupRequest::AdvanceState(advance_request) - ); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_sends_vouchers_notices_and_reports_during_advance() { - let controller = setup(); - // Set state to advance - let advance_rx = controller.advance(mock_advance_request()).await; - let finish_rx = controller.finish(FinishStatus::Accept).await; - let _ = finish_rx.await.unwrap().unwrap(); - // Insert voucher - let voucher = mock_voucher(); - let voucher_rx = controller.insert_voucher(voucher.clone()).await; - let voucher_id = voucher_rx.await.unwrap().unwrap(); - assert_eq!(voucher_id, 0); - // Insert notice - let notice = mock_notice(); - let notice_rx = controller.insert_notice(notice.clone()).await; - let notice_id = notice_rx.await.unwrap().unwrap(); - assert_eq!(notice_id, 0); - // Insert report - let report = mock_report(); - let report_rx = controller.insert_report(report.clone()).await; - report_rx.await.unwrap().unwrap(); - // Finalize the current advance state - let _ = controller.finish(FinishStatus::Reject).await; - // Obtain the advance result - let result = advance_rx.await.unwrap(); - assert_eq!(result, AdvanceResult::rejected(vec![report])); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_sends_reports_during_inspect() { - let controller = setup(); - // Set state to inspect - let inspect_rx = controller.inspect(mock_inspect_request()).await; - let finish_rx = controller.finish(FinishStatus::Accept).await; - let _ = finish_rx.await.unwrap().unwrap(); - // Insert report - let report = mock_report(); - let report_rx = controller.insert_report(report.clone()).await; - report_rx.await.unwrap().unwrap(); - // Finalize the current advance state - let _ = controller.finish(FinishStatus::Accept).await; - // Obtain the inspect result - let result = inspect_rx.await.unwrap(); - assert_eq!(result, InspectResult::accepted(vec![report])); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_handles_exception_during_advance() { - let controller = setup(); - // Set state to advance - let advance_rx = controller.advance(mock_advance_request()).await; - let finish_rx = controller.finish(FinishStatus::Accept).await; - let _ = finish_rx.await.unwrap().unwrap(); - // Send rollup exception - let exception = mock_exception(); - let exception_rx = controller.notify_exception(exception.clone()).await; - exception_rx.await.unwrap().unwrap(); - let advance_result = advance_rx.await.unwrap(); - assert_eq!(advance_result, AdvanceResult::exception(exception, vec![])); - controller.shutdown().await; - } - - #[tokio::test] - #[tracing_test::traced_test] - async fn test_it_handles_exception_during_inspect() { - let controller = setup(); - // Set state to inspect - let inspect_rx = controller.inspect(mock_inspect_request()).await; - let finish_rx = controller.finish(FinishStatus::Accept).await; - let _ = finish_rx.await.unwrap().unwrap(); - // Send rollup exception - let exception = mock_exception(); - let exception_rx = controller.notify_exception(exception.clone()).await; - exception_rx.await.unwrap().unwrap(); - let result = inspect_rx.await.unwrap(); - assert_eq!(result, InspectResult::exception(vec![], exception)); - controller.shutdown().await; - } - - fn mock_voucher() -> Voucher { - Voucher::new(rand::random(), rand::random::<[u8; 32]>().into()) - } - - fn mock_notice() -> Notice { - Notice::new(rand::random::<[u8; 32]>().into()) - } - - fn mock_report() -> Report { - Report { - payload: rand::random::<[u8; 32]>().into(), - } - } - - fn mock_exception() -> RollupException { - RollupException { - payload: rand::random::<[u8; 32]>().into(), - } - } - - fn mock_advance_request() -> AdvanceStateRequest { - AdvanceStateRequest { - metadata: AdvanceMetadata { - msg_sender: rand::random(), - epoch_index: rand::random(), - input_index: rand::random(), - block_number: rand::random(), - timestamp: rand::random(), - }, - payload: rand::random::<[u8; 32]>().into(), - } - } - - fn mock_inspect_request() -> InspectStateRequest { - InspectStateRequest { - payload: rand::random::<[u8; 32]>().into(), - } - } -} diff --git a/offchain/host-runner/src/conversions.rs b/offchain/host-runner/src/conversions.rs deleted file mode 100644 index d90df1953..000000000 --- a/offchain/host-runner/src/conversions.rs +++ /dev/null @@ -1,88 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use hex::FromHexError; -use snafu::Snafu; - -#[derive(Debug, Snafu)] -pub enum DecodeError { - #[snafu(display( - "Failed to decode ethereum binary string {} (expected 0x prefix)", - s - ))] - InvalidPrefix { s: String }, - #[snafu(display("Failed to decode ethereum binary string {} ({})", s, e))] - FromHex { s: String, e: FromHexError }, -} - -/// Convert binary array to Ethereum binary format -pub fn encode_ethereum_binary(bytes: &[u8]) -> String { - String::from("0x") + &hex::encode(bytes) -} - -/// Convert string in Ethereum binary format to binary array -pub fn decode_ethereum_binary(s: &str) -> Result, DecodeError> { - snafu::ensure!(s.starts_with("0x"), InvalidPrefixSnafu { s }); - hex::decode(&s[2..]).map_err(|e| DecodeError::FromHex { - s: s.to_string(), - e, - }) -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_encode() { - assert_eq!( - encode_ethereum_binary(&[0x01, 0x20, 0xFF, 0x00]).as_str(), - "0x0120ff00" - ); - } - - #[test] - fn test_encode_with_empty() { - assert_eq!(encode_ethereum_binary(&[]).as_str(), "0x"); - } - - #[test] - fn test_decode_with_uppercase() { - assert_eq!( - decode_ethereum_binary("0x0120FF00").unwrap(), - vec![0x01, 0x20, 0xFF, 0x00] - ); - } - - #[test] - fn test_decode_with_lowercase() { - assert_eq!(decode_ethereum_binary("0xff").unwrap(), vec![0xFF]); - } - - #[test] - fn test_decode_with_invalid_prefix() { - let err = decode_ethereum_binary("0X0120FF00").unwrap_err(); - assert_eq!( - err.to_string().as_str(), - "Failed to decode ethereum binary string 0X0120FF00 (expected 0x prefix)", - ); - } - - #[test] - fn test_decode_with_invalid_number() { - let err = decode_ethereum_binary("0xZZ").unwrap_err(); - assert_eq!( - err.to_string().as_str(), - "Failed to decode ethereum binary string 0xZZ (Invalid character 'Z' at position 0)" - ); - } - - #[test] - fn test_decode_with_odd_number_of_chars() { - let err = decode_ethereum_binary("0xA").unwrap_err(); - assert_eq!( - err.to_string().as_str(), - "Failed to decode ethereum binary string 0xA (Odd number of digits)" - ); - } -} diff --git a/offchain/host-runner/src/driver.rs b/offchain/host-runner/src/driver.rs deleted file mode 100644 index f8c67a909..000000000 --- a/offchain/host-runner/src/driver.rs +++ /dev/null @@ -1,74 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use byteorder::{BigEndian, WriteBytesExt}; -use std::mem::size_of; - -use crate::hash::{Digest, Hash, Hasher, HASH_SIZE}; - -pub fn compute_voucher_hash(destination: &[u8], payload: &[u8]) -> Hash { - let mut hasher = Hasher::new(); - write_data(&mut hasher, destination); - write_u64(&mut hasher, 0x40); - write_payload(&mut hasher, payload); - hasher.finalize().into() -} - -pub fn compute_notice_hash(payload: &[u8]) -> Hash { - let mut hasher = Hasher::new(); - write_u64(&mut hasher, 0x20); - write_payload(&mut hasher, payload); - hasher.finalize().into() -} - -fn write_padding(hasher: &mut Hasher, n: usize) { - let alignment = n % HASH_SIZE; - if alignment != 0 { - for _ in alignment..HASH_SIZE { - hasher.write_u8(0).expect("cannot fail"); - } - } -} - -fn write_u64(hasher: &mut Hasher, value: u64) { - write_padding(hasher, size_of::()); - hasher.write_u64::(value).expect("cannot fail"); -} - -fn write_data(hasher: &mut Hasher, data: &[u8]) { - write_padding(hasher, data.len()); - hasher.update(data); -} - -fn write_payload(hasher: &mut Hasher, payload: &[u8]) { - write_u64(hasher, payload.len() as u64); - hasher.update(payload); - write_padding(hasher, payload.len()); -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_update_voucher_hash() { - let destination = - hex::decode("5555555555555555555555555555555555555555").unwrap(); - let payload: Vec = "hello world".as_bytes().into(); - let hash = compute_voucher_hash(&destination, &payload); - let expected_hash = Hash::decode( - "61a61380d2a3b5e2b09a5ff259a2e1048da1989bdd6d6ecc69594cfbedc01278", - ); - assert_eq!(&hash, &expected_hash); - } - - #[test] - fn test_update_notice_hash() { - let payload: Vec = "hello world".as_bytes().into(); - let hash = compute_notice_hash(&payload); - let expected_hash = Hash::decode( - "d9f29a4e347ad89dc70490124ee6975fbc0693c7e72d6bc383673bfd0e8841f2", - ); - assert_eq!(&hash, &expected_hash); - } -} diff --git a/offchain/host-runner/src/grpc/mod.rs b/offchain/host-runner/src/grpc/mod.rs deleted file mode 100644 index 82ec8fbef..000000000 --- a/offchain/host-runner/src/grpc/mod.rs +++ /dev/null @@ -1,47 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod server_manager; - -use futures_util::FutureExt; -use std::future::Future; -use tonic::transport::Server; -use tonic_health::pb::health_server::{Health, HealthServer}; - -use grpc_interfaces::cartesi_server_manager::server_manager_server::ServerManagerServer; -use server_manager::ServerManagerService; - -use crate::config::Config; -use crate::controller::Controller; - -/// Create the grpc healthcheck for the host-runner -/// -/// Since the host-runner doesn't rely on any other service to function, it is always -/// healthy. -async fn create_health_service() -> HealthServer { - let (mut health_reporter, health_service) = - tonic_health::server::health_reporter(); - health_reporter - .set_serving::>() - .await; - health_service -} - -pub async fn start_service>( - config: &Config, - controller: Controller, - signal: F, -) -> Result<(), tonic::transport::Error> { - let addr = format!( - "{}:{}", - config.grpc_server_manager_address, config.grpc_server_manager_port - ) - .parse() - .expect("invalid config"); - let service = ServerManagerService::new(controller); - Server::builder() - .add_service(create_health_service().await) - .add_service(ServerManagerServer::new(service)) - .serve_with_shutdown(addr, signal.map(|_| ())) - .await -} diff --git a/offchain/host-runner/src/grpc/server_manager.rs b/offchain/host-runner/src/grpc/server_manager.rs deleted file mode 100644 index 634c39d5c..000000000 --- a/offchain/host-runner/src/grpc/server_manager.rs +++ /dev/null @@ -1,1036 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::controller::Controller; -use crate::hash::{Hash, HASH_SIZE}; -use crate::merkle_tree::{ - complete::Tree, proof::Proof, Error as MerkleTreeError, -}; -use crate::model::{ - AdvanceMetadata, AdvanceResult, AdvanceStateRequest, CompletionStatus, - InspectStateRequest, InspectStatus, Notice, Report, Voucher, -}; -use crate::proofs::compute_proofs; -use ethabi::ethereum_types::U256; -use ethabi::Token; -use grpc_interfaces::cartesi_machine::{ - Hash as GrpcHash, MerkleTreeProof as GrpcMerkleTreeProof, Void, -}; -use grpc_interfaces::cartesi_server_manager::{ - processed_input::ProcessedInputOneOf, server_manager_server::ServerManager, - AcceptedData, Address, AdvanceStateRequest as GrpcAdvanceStateRequest, - CompletionStatus as GrpcCompletionStatus, - DeleteEpochRequest as GrpcDeleteEpochRequest, EndSessionRequest, - EpochState, FinishEpochRequest, FinishEpochResponse, GetEpochStatusRequest, - GetEpochStatusResponse, GetSessionStatusRequest, GetSessionStatusResponse, - GetStatusResponse, InspectStateRequest as GrpcInspectStateRequest, - InspectStateResponse, Notice as GrpcNotice, OutputEnum, - OutputValidityProof, ProcessedInput, Proof as GrpcProof, - Report as GrpcReport, StartSessionRequest, StartSessionResponse, - TaintStatus, Voucher as GrpcVoucher, -}; -use grpc_interfaces::versioning::{GetVersionResponse, SemanticVersion}; -use std::{collections::HashMap, sync::Arc}; -use tokio::sync::Mutex; -use tonic::{Request, Response, Status}; - -pub struct ServerManagerService { - controller: Controller, - sessions: SessionManager, -} - -impl ServerManagerService { - pub fn new(controller: Controller) -> Self { - Self { - controller, - sessions: SessionManager::new(), - } - } -} - -#[tonic::async_trait] -impl ServerManager for ServerManagerService { - async fn get_version( - &self, - _: Request, - ) -> Result, Status> { - tracing::info!("received get_version"); - let response = GetVersionResponse { - version: Some(SemanticVersion { - major: 0, - minor: 2, - patch: 0, - pre_release: String::from(""), - build: String::from("host-runner"), - }), - }; - Ok(Response::new(response)) - } - - async fn start_session( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - tracing::info!("received start_session with id={}", request.session_id); - self.sessions - .try_set_session( - request.session_id, - request.active_epoch_index, - request.processed_input_count, - self.controller.clone(), - ) - .await?; - let response = StartSessionResponse { config: None }; - Ok(Response::new(response)) - } - - async fn end_session( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - tracing::info!("received end_session with id={}", request.session_id); - self.sessions.try_del_session(&request.session_id).await?; - Ok(Response::new(Void {})) - } - - async fn advance_state( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - tracing::info!("received advance_state with id={}", request.session_id); - let metadata = request - .input_metadata - .ok_or(Status::invalid_argument("missing metadata from request"))?; - let msg_sender = metadata - .msg_sender - .ok_or(Status::invalid_argument( - "missing msg_sender from metadata", - ))? - .data - .try_into() - .or(Err(Status::invalid_argument("invalid address")))?; - if metadata.epoch_index != 0 { - return Err(Status::invalid_argument( - "metadata epoch index is deprecated and should always be 0", - )); - } - if metadata.input_index != request.current_input_index { - return Err(Status::invalid_argument( - "metadata input index mismatch", - )); - } - let advance_request = AdvanceStateRequest { - metadata: AdvanceMetadata { - msg_sender, - epoch_index: metadata.epoch_index, - input_index: metadata.input_index, - block_number: metadata.block_number, - timestamp: metadata.timestamp, - }, - payload: request.input_payload, - }; - self.sessions - .try_get_session(&request.session_id) - .await? - .try_lock() - .or(Err(Status::aborted("concurrent call in session")))? - .try_advance( - request.active_epoch_index, - request.current_input_index, - advance_request, - ) - .await?; - Ok(Response::new(Void {})) - } - - async fn finish_epoch( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - tracing::info!("received finish_epoch with id={}", request.session_id); - if !request.storage_directory.is_empty() { - tracing::warn!("ignoring storage_directory parameter"); - } - let response = self - .sessions - .try_get_session(&request.session_id) - .await? - .try_lock() - .or(Err(Status::aborted("concurrent call in session")))? - .try_finish_epoch( - request.active_epoch_index, - request.processed_input_count_within_epoch, - ) - .await?; - Ok(Response::new(response)) - } - - async fn inspect_state( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - tracing::info!("received inspect_state with id={}", request.session_id); - self.sessions - .try_get_session(&request.session_id) - .await? - .try_lock() - .or(Err(Status::aborted("concurrent call in session")))? - .try_inspect(request.session_id, request.query_payload) - .await - .map(Response::new) - } - - async fn get_status( - &self, - _: Request, - ) -> Result, Status> { - tracing::info!("received get_status"); - let session_id = self.sessions.get_sessions().await; - Ok(Response::new(GetStatusResponse { session_id })) - } - - async fn get_session_status( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - tracing::info!( - "received get_session_status with id={}", - request.session_id - ); - let response = self - .sessions - .try_get_session(&request.session_id) - .await? - .try_lock() - .or(Err(Status::aborted("concurrent call in session")))? - .get_status(request.session_id) - .await; - Ok(Response::new(response)) - } - - async fn get_epoch_status( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - tracing::info!( - "received get_epoch_status with id={} and epoch_index={}", - request.session_id, - request.epoch_index - ); - let response = self - .sessions - .try_get_session(&request.session_id) - .await? - .try_lock() - .or(Err(Status::aborted("concurrent call in session")))? - .try_get_epoch_status(request.session_id, request.epoch_index) - .await?; - Ok(Response::new(response)) - } - - async fn delete_epoch( - &self, - request: Request, - ) -> Result, Status> { - let request = request.into_inner(); - self.sessions - .try_get_session(&request.session_id) - .await? - .try_lock() - .or(Err(Status::aborted("concurrent call in session")))? - .try_delete_epoch(request.epoch_index) - .await?; - Ok(Response::new(Void {})) - } -} - -struct SessionManager { - entry: Mutex>, -} - -impl SessionManager { - fn new() -> Self { - Self { - entry: Mutex::new(None), - } - } - - async fn try_set_session( - &self, - session_id: String, - active_epoch_index: u64, - processed_input_count: u64, - controller: Controller, - ) -> Result<(), Status> { - if session_id.is_empty() { - return Err(Status::invalid_argument("session id is empty")); - } - let mut entry = self.entry.lock().await; - match *entry { - Some(_) => { - tracing::warn!( - "the host-runner only supports a single session" - ); - Err(Status::already_exists("session id is taken")) - } - None => { - *entry = Some(SessionEntry::new( - session_id, - active_epoch_index, - processed_input_count, - controller, - )); - Ok(()) - } - } - } - - async fn try_get_session( - &self, - request_id: &String, - ) -> Result>, Status> { - self.entry - .lock() - .await - .as_ref() - .and_then(|entry| entry.get_session(request_id)) - .ok_or(Status::invalid_argument("session id not found")) - } - - async fn try_del_session(&self, request_id: &String) -> Result<(), Status> { - self.try_get_session(request_id) - .await? - .try_lock() - .or(Err(Status::aborted("concurrent call in session")))? - .check_endable() - .await?; - let mut entry = self.entry.lock().await; - *entry = None; - Ok(()) - } - - async fn get_sessions(&self) -> Vec { - let mut sessions = Vec::new(); - if let Some(entry) = self.entry.lock().await.as_ref() { - sessions.push(entry.get_id()); - } - sessions - } -} - -struct SessionEntry { - id: String, - session: Arc>, -} - -impl SessionEntry { - fn new( - id: String, - active_epoch_index: u64, - processed_input_count: u64, - controller: Controller, - ) -> Self { - Self { - id, - session: Arc::new(Mutex::new(Session::new( - active_epoch_index, - processed_input_count, - controller, - ))), - } - } - - fn get_session(&self, request_id: &String) -> Option>> { - if &self.id == request_id { - Some(self.session.clone()) - } else { - None - } - } - - fn get_id(&self) -> String { - self.id.clone() - } -} - -struct Session { - active_epoch_index: u64, - controller: Controller, - epochs: HashMap>>, - tainted: Arc>>, -} - -impl Session { - fn new( - active_epoch_index: u64, - processed_input_count: u64, - controller: Controller, - ) -> Self { - let epoch = Arc::new(Mutex::new(Epoch::new(processed_input_count))); - let mut epochs = HashMap::new(); - epochs.insert(active_epoch_index, epoch); - Self { - active_epoch_index, - controller, - epochs, - tainted: Arc::new(Mutex::new(None)), - } - } - - async fn try_advance( - &mut self, - active_epoch_index: u64, - current_input_index: u64, - advance_request: AdvanceStateRequest, - ) -> Result<(), Status> { - self.check_epoch_index_overflow()?; - self.check_tainted().await?; - self.check_active_epoch(active_epoch_index)?; - let epoch = self.try_get_epoch(active_epoch_index)?; - epoch - .lock() - .await - .try_add_pending_input(current_input_index)?; - let rx = self.controller.advance(advance_request).await; - let epoch = epoch.clone(); - let tainted = self.tainted.clone(); - // Handle the advance response in another thread - tokio::spawn(async move { - match rx.await { - Ok(result) => { - if let Err(e) = - epoch.lock().await.add_processed_input(result) - { - tracing::error!( - "failed to add processed input; tainting session" - ); - *tainted.lock().await = Some(e); - } - } - Err(_) => { - tracing::error!("sender dropped the channel"); - } - } - }); - Ok(()) - } - - async fn try_inspect( - &mut self, - session_id: String, - payload: Vec, - ) -> Result { - self.check_tainted().await?; - let rx = self - .controller - .inspect(InspectStateRequest { payload }) - .await; - let result = rx.await.map_err(|e| { - tracing::error!("sender dropped the channel ({})", e); - Status::internal("unexpected error during inspect") - })?; - let active_epoch_index = self.active_epoch_index; - let epoch = self.try_get_epoch(active_epoch_index)?; - let processed_input_count = - epoch.lock().await.get_num_processed_inputs_since_genesis(); - Ok(InspectStateResponse { - session_id, - active_epoch_index, - processed_input_count, - status: (&result.status).into(), - exception_data: match result.status { - InspectStatus::Exception { exception } => { - Some(exception.payload) - } - _ => None, - }, - reports: result.reports.into_iter().map(GrpcReport::from).collect(), - }) - } - - async fn try_finish_epoch( - &mut self, - active_epoch_index: u64, - processed_input_count_within_epoch: u64, - ) -> Result { - self.check_epoch_index_overflow()?; - self.check_tainted().await?; - self.check_active_epoch(active_epoch_index)?; - let (response, processed_input_count_since_genesis) = { - let mut last_epoch = - self.try_get_epoch(active_epoch_index)?.lock().await; - ( - last_epoch.try_finish( - processed_input_count_within_epoch, - active_epoch_index, - )?, - last_epoch.processed_input_count_since_genesis, - ) - }; - self.active_epoch_index += 1; - let epoch = Arc::new(Mutex::new(Epoch::new( - processed_input_count_since_genesis - + processed_input_count_within_epoch, - ))); - self.epochs.insert(self.active_epoch_index, epoch); - Ok(response) - } - - async fn try_delete_epoch( - &mut self, - epoch_index: u64, - ) -> Result<(), Status> { - self.check_tainted().await?; - self.try_get_epoch(epoch_index)? - .lock() - .await - .check_finished()?; - self.epochs.remove(&epoch_index); - Ok(()) - } - - async fn get_status(&self, session_id: String) -> GetSessionStatusResponse { - let mut epoch_index: Vec = self.epochs.keys().cloned().collect(); - epoch_index.sort(); - GetSessionStatusResponse { - session_id, - active_epoch_index: self.active_epoch_index, - epoch_index, - taint_status: self.get_taint_status().await, - } - } - - async fn get_taint_status(&self) -> Option { - self.tainted - .lock() - .await - .as_ref() - .map(|status| TaintStatus { - error_code: status.code() as i32, - error_message: String::from(status.message()), - }) - } - - async fn try_get_epoch_status( - &self, - session_id: String, - epoch_index: u64, - ) -> Result { - let taint_status = self.get_taint_status().await; - let response = self - .try_get_epoch(epoch_index)? - .lock() - .await - .get_status(session_id, epoch_index, taint_status); - Ok(response) - } - - fn try_get_epoch( - &self, - epoch_index: u64, - ) -> Result<&Arc>, Status> { - self.epochs - .get(&epoch_index) - .ok_or(Status::invalid_argument("unknown epoch index")) - } - - async fn check_endable(&self) -> Result<(), Status> { - if self.tainted.lock().await.is_none() { - self.try_get_epoch(self.active_epoch_index)? - .lock() - .await - .check_endable()?; - } - Ok(()) - } - - async fn check_tainted(&self) -> Result<(), Status> { - if self.tainted.lock().await.is_some() { - Err(Status::data_loss("session is tainted")) - } else { - Ok(()) - } - } - - fn check_epoch_index_overflow(&self) -> Result<(), Status> { - if self.active_epoch_index == std::u64::MAX { - Err(Status::out_of_range("active epoch index will overflow")) - } else { - Ok(()) - } - } - - fn check_active_epoch( - &self, - active_epoch_index: u64, - ) -> Result<(), Status> { - if self.active_epoch_index != active_epoch_index { - Err(Status::invalid_argument(format!( - "incorrect active epoch index (expected {}, got {})", - self.active_epoch_index, active_epoch_index - ))) - } else { - Ok(()) - } - } -} - -/// The keccak output has 32 bytes -const LOG2_KECCAK_SIZE: usize = 5; - -/// The epoch tree has 2^32 leafs -const LOG2_ROOT_SIZE: usize = 32 + LOG2_KECCAK_SIZE; - -/// The max number of inputs in an epoch is limited by the size of the merkle tree -const MAX_INPUTS_IN_EPOCH: usize = 1 << (LOG2_ROOT_SIZE - LOG2_KECCAK_SIZE); - -#[derive(Debug)] -struct Epoch { - state: EpochState, - pending_inputs: u64, - processed_inputs: Vec, - vouchers_tree: Tree, - notices_tree: Tree, - processed_input_count_since_genesis: u64, -} - -impl Epoch { - fn new(processed_input_count_since_genesis: u64) -> Self { - Self { - state: EpochState::Active, - pending_inputs: 0, - processed_inputs: vec![], - vouchers_tree: Tree::new( - LOG2_ROOT_SIZE, - LOG2_KECCAK_SIZE, - LOG2_KECCAK_SIZE, - ) - .expect("cannot fail"), - notices_tree: Tree::new( - LOG2_ROOT_SIZE, - LOG2_KECCAK_SIZE, - LOG2_KECCAK_SIZE, - ) - .expect("cannot fail"), - processed_input_count_since_genesis, - } - } - - fn try_add_pending_input( - &mut self, - current_input_index: u64, - ) -> Result<(), Status> { - self.check_active()?; - self.check_current_input_index(current_input_index)?; - self.check_input_limit()?; - self.pending_inputs += 1; - Ok(()) - } - - fn add_processed_input( - &mut self, - mut result: AdvanceResult, - ) -> Result<(), Status> { - // Compute proofs and update vouchers and notices trees - if let CompletionStatus::Accepted { vouchers, notices } = - &mut result.status - { - let voucher_root = compute_proofs(vouchers)?; - result.voucher_root = Some(voucher_root.clone()); - self.vouchers_tree.push(voucher_root)?; - let notice_root = compute_proofs(notices)?; - result.notice_root = Some(notice_root.clone()); - self.notices_tree.push(notice_root)?; - } else { - self.vouchers_tree.push(Hash::default())?; - self.notices_tree.push(Hash::default())?; - } - // Setup proofs for the current result - let address = (self.vouchers_tree.len() - 1) << LOG2_KECCAK_SIZE; - result.voucher_hashes_in_epoch = - Some(self.vouchers_tree.get_proof(address, LOG2_KECCAK_SIZE)?); - result.notice_hashes_in_epoch = - Some(self.notices_tree.get_proof(address, LOG2_KECCAK_SIZE)?); - // Add result to processed inputs - self.pending_inputs -= 1; - self.processed_inputs.push(result); - Ok(()) - } - - fn try_finish( - &mut self, - processed_input_count_within_epoch: u64, - epoch_index: u64, - ) -> Result { - self.check_active()?; - self.check_pending_inputs()?; - self.check_processed_inputs(processed_input_count_within_epoch)?; - self.state = EpochState::Finished; - - let machine_state_hash = GrpcHash { - data: vec![0_u8; HASH_SIZE], - }; - let mut proofs: Vec = vec![]; - let index = Token::Int(U256::from(epoch_index)); - let context = ethabi::encode(&[index]); - - for (local_input_index, result) in - self.processed_inputs.iter_mut().enumerate() - { - let address = local_input_index << LOG2_KECCAK_SIZE; - let voucher_hashes_in_epoch = - self.vouchers_tree.get_proof(address, LOG2_KECCAK_SIZE)?; - let notice_hashes_in_epoch = - self.notices_tree.get_proof(address, LOG2_KECCAK_SIZE)?; - let global_input_index = self.processed_input_count_since_genesis - + local_input_index as u64; - - if let CompletionStatus::Accepted { vouchers, notices } = - &mut result.status - { - // Create GrpcProof for each voucher - for (output_index, voucher) in vouchers.iter().enumerate() { - proofs.push(GrpcProof { - input_index: global_input_index, - output_index: output_index as u64, - output_enum: OutputEnum::Voucher.into(), - // Create OutputValidityProof for each voucher - validity: Some(OutputValidityProof { - input_index_within_epoch: local_input_index as u64, - output_index_within_input: output_index as u64, - output_hashes_root_hash: Some(GrpcHash::from( - result.voucher_root.clone().expect( - "expected voucher's root hash to exist", - ), - )), - vouchers_epoch_root_hash: Some(GrpcHash::from( - self.vouchers_tree.get_root_hash().clone(), - )), - notices_epoch_root_hash: Some(GrpcHash::from( - self.notices_tree.get_root_hash().clone(), - )), - machine_state_hash: Some( - machine_state_hash.clone(), - ), - output_hash_in_output_hashes_siblings: voucher - .keccak_in_voucher_hashes - .clone() - .expect("expected voucher proof to exist") - .sibling_hashes - .into_iter() - .map(GrpcHash::from) - .collect(), - output_hashes_in_epoch_siblings: - voucher_hashes_in_epoch - .clone() - .sibling_hashes - .into_iter() - .map(GrpcHash::from) - .collect(), - }), - context: context.clone(), - }) - } - // Create GrpcProof for each notice - for (output_index, notice) in notices.iter().enumerate() { - proofs.push(GrpcProof { - input_index: global_input_index, - output_index: output_index as u64, - output_enum: OutputEnum::Notice.into(), - // Create OutputValidityProof for each notice - validity: Some(OutputValidityProof { - input_index_within_epoch: local_input_index as u64, - output_index_within_input: output_index as u64, - output_hashes_root_hash: Some(GrpcHash::from( - result.notice_root.clone().expect( - "expected notice's root hash to exist", - ), - )), - vouchers_epoch_root_hash: Some(GrpcHash::from( - self.vouchers_tree.get_root_hash().clone(), - )), - notices_epoch_root_hash: Some(GrpcHash::from( - self.notices_tree.get_root_hash().clone(), - )), - machine_state_hash: Some( - machine_state_hash.clone(), - ), - output_hash_in_output_hashes_siblings: notice - .keccak_in_notice_hashes - .clone() - .expect("expected notice proof to exist") - .sibling_hashes - .into_iter() - .map(GrpcHash::from) - .collect(), - output_hashes_in_epoch_siblings: - notice_hashes_in_epoch - .clone() - .sibling_hashes - .into_iter() - .map(GrpcHash::from) - .collect(), - }), - context: context.clone(), - }) - } - } - } - - Ok(FinishEpochResponse { - machine_hash: Some(machine_state_hash.clone()), - vouchers_epoch_root_hash: Some(GrpcHash::from( - self.vouchers_tree.get_root_hash().clone(), - )), - notices_epoch_root_hash: Some(GrpcHash::from( - self.notices_tree.get_root_hash().clone(), - )), - proofs, - }) - } - - fn get_status( - &self, - session_id: String, - epoch_index: u64, - taint_status: Option, - ) -> GetEpochStatusResponse { - let processed_inputs = self - .processed_inputs - .iter() - .cloned() - .enumerate() - .map(|(local_input_index, input)| { - let input_index = local_input_index as u64 - + self.processed_input_count_since_genesis; - ProcessedInput { - input_index, - status: (&input.status).into(), - processed_input_one_of: input.status.into(), - reports: input - .reports - .into_iter() - .map(GrpcReport::from) - .collect(), - } - }) - .collect(); - GetEpochStatusResponse { - session_id, - epoch_index, - state: self.state as i32, - processed_inputs, - pending_input_count: self.pending_inputs, - taint_status, - } - } - - fn get_num_processed_inputs_within_epoch(&self) -> u64 { - self.processed_inputs.len() as u64 - } - - fn get_num_processed_inputs_since_genesis(&self) -> u64 { - self.get_num_processed_inputs_within_epoch() - + self.processed_input_count_since_genesis - } - - fn get_current_input_index(&self) -> u64 { - self.pending_inputs + self.get_num_processed_inputs_since_genesis() - } - - fn check_endable(&self) -> Result<(), Status> { - self.check_pending_inputs()?; - self.check_no_processed_inputs()?; - Ok(()) - } - - fn check_active(&self) -> Result<(), Status> { - if self.state != EpochState::Active { - Err(Status::invalid_argument("epoch is finished")) - } else { - Ok(()) - } - } - - fn check_finished(&self) -> Result<(), Status> { - match self.check_active() { - Ok(_) => Err(Status::invalid_argument("epoch is not finished")), - Err(_) => Ok(()), - } - } - - fn check_current_input_index( - &self, - current_input_index: u64, - ) -> Result<(), Status> { - let epoch_current_input_index = self.get_current_input_index(); - if epoch_current_input_index != current_input_index { - Err(Status::invalid_argument(format!( - "incorrect current input index (expected {}, got {})", - epoch_current_input_index, current_input_index - ))) - } else { - Ok(()) - } - } - - fn check_pending_inputs(&self) -> Result<(), Status> { - if self.pending_inputs != 0 { - Err(Status::invalid_argument("epoch still has pending inputs")) - } else { - Ok(()) - } - } - - fn check_processed_inputs( - &self, - processed_input_count_within_epoch: u64, - ) -> Result<(), Status> { - if self.get_num_processed_inputs_within_epoch() - != processed_input_count_within_epoch - { - Err(Status::invalid_argument(format!( - "incorrect processed input count (expected {}, got {})", - self.get_num_processed_inputs_within_epoch(), - processed_input_count_within_epoch - ))) - } else { - Ok(()) - } - } - - fn check_no_processed_inputs(&self) -> Result<(), Status> { - if self.get_num_processed_inputs_within_epoch() != 0 { - Err(Status::invalid_argument("epoch still has processed inputs")) - } else { - Ok(()) - } - } - - fn check_input_limit(&self) -> Result<(), Status> { - if self.pending_inputs - + self.get_num_processed_inputs_within_epoch() - + 1 - >= MAX_INPUTS_IN_EPOCH as u64 - { - Err(Status::invalid_argument( - "reached max number of inputs per epoch", - )) - } else { - Ok(()) - } - } -} - -impl From<&CompletionStatus> for i32 { - fn from(status: &CompletionStatus) -> i32 { - let status = match status { - CompletionStatus::Accepted { .. } => GrpcCompletionStatus::Accepted, - CompletionStatus::Rejected => GrpcCompletionStatus::Rejected, - CompletionStatus::Exception { .. } => { - GrpcCompletionStatus::Exception - } - }; - status as i32 - } -} - -impl From<&InspectStatus> for i32 { - fn from(status: &InspectStatus) -> i32 { - let status = match status { - InspectStatus::Accepted => GrpcCompletionStatus::Accepted, - InspectStatus::Rejected => GrpcCompletionStatus::Rejected, - InspectStatus::Exception { .. } => GrpcCompletionStatus::Exception, - }; - status as i32 - } -} - -impl From for Option { - fn from(status: CompletionStatus) -> Option { - match status { - CompletionStatus::Accepted { vouchers, notices } => { - Some(ProcessedInputOneOf::AcceptedData(AcceptedData { - vouchers: vouchers - .into_iter() - .map(GrpcVoucher::from) - .collect(), - notices: notices - .into_iter() - .map(GrpcNotice::from) - .collect(), - })) - } - CompletionStatus::Rejected => None, - CompletionStatus::Exception { exception } => { - Some(ProcessedInputOneOf::ExceptionData(exception.payload)) - } - } - } -} - -impl From for GrpcVoucher { - fn from(voucher: Voucher) -> GrpcVoucher { - GrpcVoucher { - destination: Some(Address { - data: voucher.destination.into(), - }), - payload: voucher.payload, - } - } -} - -impl From for GrpcNotice { - fn from(notice: Notice) -> GrpcNotice { - GrpcNotice { - payload: notice.payload, - } - } -} - -impl From for GrpcReport { - fn from(report: Report) -> GrpcReport { - GrpcReport { - payload: report.payload, - } - } -} - -impl From for GrpcHash { - fn from(hash: Hash) -> GrpcHash { - GrpcHash { data: hash.into() } - } -} - -impl From for GrpcMerkleTreeProof { - fn from(proof: Proof) -> GrpcMerkleTreeProof { - GrpcMerkleTreeProof { - target_address: proof.target_address as u64, - log2_target_size: proof.log2_target_size as u64, - target_hash: Some(proof.target_hash.into()), - log2_root_size: proof.log2_root_size as u64, - root_hash: Some(proof.root_hash.into()), - sibling_hashes: proof - .sibling_hashes - .into_iter() - .map(GrpcHash::from) - .collect(), - } - } -} - -impl From for Status { - fn from(e: MerkleTreeError) -> Status { - Status::internal(format!( - "unexpected error when updating merkle tree ({})", - e - )) - } -} diff --git a/offchain/host-runner/src/hash.rs b/offchain/host-runner/src/hash.rs deleted file mode 100644 index ed5706ecc..000000000 --- a/offchain/host-runner/src/hash.rs +++ /dev/null @@ -1,72 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub use sha3::Digest; -use sha3::{digest::Output, Keccak256}; - -pub const HASH_SIZE: usize = 32; - -pub type Hasher = Keccak256; - -#[derive(Clone, PartialEq, Eq)] -pub struct Hash { - data: [u8; HASH_SIZE], -} - -impl Hash { - pub fn data(&self) -> &[u8; HASH_SIZE] { - &self.data - } - - #[cfg(test)] - pub fn decode(s: &str) -> Hash { - Hash { - data: hex::decode(&s) - .expect("invalid hex string") - .try_into() - .expect("cannot fail"), - } - } -} - -impl Default for Hash { - fn default() -> Self { - Self { - data: [0; HASH_SIZE], - } - } -} - -impl From> for Hash { - fn from(arr: Output) -> Hash { - Hash { data: arr.into() } - } -} - -impl From<[u8; HASH_SIZE]> for Hash { - fn from(data: [u8; HASH_SIZE]) -> Hash { - Hash { data } - } -} - -impl TryFrom> for Hash { - type Error = Vec; - - fn try_from(v: Vec) -> Result> { - Ok(Hash { - data: v.try_into()?, - }) - } -} - -impl From for Vec { - fn from(hash: Hash) -> Vec { - Vec::from(hash.data) - } -} - -impl std::fmt::Debug for Hash { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", hex::encode(self.data)) - } -} diff --git a/offchain/host-runner/src/http/errors.rs b/offchain/host-runner/src/http/errors.rs deleted file mode 100644 index 79143cb53..000000000 --- a/offchain/host-runner/src/http/errors.rs +++ /dev/null @@ -1,40 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use actix_web::{error, error::Error}; - -use crate::controller::ControllerError; -use crate::conversions::DecodeError; -use crate::model::RollupException; - -use super::model::{DecodeStatusError, VoucherDecodeError}; - -impl From for Error { - fn from(e: RollupException) -> Error { - error::ErrorInternalServerError(e.to_string()) - } -} - -impl From for Error { - fn from(e: ControllerError) -> Error { - error::ErrorBadRequest(e.to_string()) - } -} - -impl From for Error { - fn from(e: DecodeError) -> Error { - error::ErrorBadRequest(e.to_string()) - } -} - -impl From for Error { - fn from(e: VoucherDecodeError) -> Error { - error::ErrorBadRequest(e.to_string()) - } -} - -impl From for Error { - fn from(e: DecodeStatusError) -> Error { - error::ErrorBadRequest(e.to_string()) - } -} diff --git a/offchain/host-runner/src/http/mod.rs b/offchain/host-runner/src/http/mod.rs deleted file mode 100644 index fc14244b0..000000000 --- a/offchain/host-runner/src/http/mod.rs +++ /dev/null @@ -1,17 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub mod errors; -pub mod model; -mod rollup_server; - -use crate::config::Config; -use crate::controller::Controller; - -/// Setup the HTTP server that receives requests from the DApp backend -pub async fn start_services( - config: &Config, - controller: Controller, -) -> std::io::Result<()> { - rollup_server::start_service(config, controller.clone()).await -} diff --git a/offchain/host-runner/src/http/model.rs b/offchain/host-runner/src/http/model.rs deleted file mode 100644 index 3cd67fc6d..000000000 --- a/offchain/host-runner/src/http/model.rs +++ /dev/null @@ -1,204 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use serde::{Deserialize, Serialize}; -use snafu::Snafu; - -use crate::conversions::{self, DecodeError}; -use crate::model::*; - -#[derive(Debug, Serialize, Deserialize)] -pub struct HttpAdvanceMetadata { - pub msg_sender: String, - pub epoch_index: u64, - pub input_index: u64, - pub block_number: u64, - pub timestamp: u64, -} - -impl From for HttpAdvanceMetadata { - fn from(metadata: AdvanceMetadata) -> HttpAdvanceMetadata { - HttpAdvanceMetadata { - msg_sender: conversions::encode_ethereum_binary( - &metadata.msg_sender, - ), - epoch_index: metadata.epoch_index, - input_index: metadata.input_index, - block_number: metadata.block_number, - timestamp: metadata.timestamp, - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct HttpAdvanceStateRequest { - pub metadata: HttpAdvanceMetadata, - pub payload: String, -} - -impl From for HttpAdvanceStateRequest { - fn from(request: AdvanceStateRequest) -> HttpAdvanceStateRequest { - HttpAdvanceStateRequest { - metadata: request.metadata.into(), - payload: conversions::encode_ethereum_binary(&request.payload), - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct HttpInspectStateRequest { - pub payload: String, -} - -impl From for HttpInspectStateRequest { - fn from(request: InspectStateRequest) -> HttpInspectStateRequest { - HttpInspectStateRequest { - payload: conversions::encode_ethereum_binary(&request.payload), - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "request_type")] -pub enum HttpRollupRequest { - #[serde(rename = "advance_state")] - AdvanceState { data: HttpAdvanceStateRequest }, - #[serde(rename = "inspect_state")] - InspectState { data: HttpInspectStateRequest }, -} - -impl From for HttpRollupRequest { - fn from(request: RollupRequest) -> HttpRollupRequest { - match request { - RollupRequest::AdvanceState(request) => { - HttpRollupRequest::AdvanceState { - data: request.into(), - } - } - RollupRequest::InspectState(request) => { - HttpRollupRequest::InspectState { - data: request.into(), - } - } - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct HttpVoucher { - pub destination: String, - pub payload: String, -} - -impl TryFrom for Voucher { - type Error = VoucherDecodeError; - fn try_from(voucher: HttpVoucher) -> Result { - Ok(Voucher::new( - conversions::decode_ethereum_binary(&voucher.destination)? - .try_into()?, - conversions::decode_ethereum_binary(&voucher.payload)?, - )) - } -} - -#[derive(Debug, Snafu)] -pub enum VoucherDecodeError { - #[snafu(display( - "Invalid Ethereum address size (got {} bytes, expected 20 bytes)", - got - ))] - InvalidAddressSize { got: usize }, - #[snafu(display("{}", e))] - HexDecodeError { e: DecodeError }, -} - -impl From for VoucherDecodeError { - fn from(e: DecodeError) -> VoucherDecodeError { - VoucherDecodeError::HexDecodeError { e } - } -} - -impl From> for VoucherDecodeError { - fn from(bytes: Vec) -> VoucherDecodeError { - VoucherDecodeError::InvalidAddressSize { got: bytes.len() } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct HttpNotice { - pub payload: String, -} - -impl TryFrom for Notice { - type Error = DecodeError; - fn try_from(notice: HttpNotice) -> Result { - Ok(Notice::new(conversions::decode_ethereum_binary( - ¬ice.payload, - )?)) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct HttpReport { - pub payload: String, -} - -impl From for HttpReport { - fn from(report: Report) -> HttpReport { - HttpReport { - payload: conversions::encode_ethereum_binary(&report.payload), - } - } -} - -impl TryFrom for Report { - type Error = DecodeError; - fn try_from(report: HttpReport) -> Result { - Ok(Report { - payload: conversions::decode_ethereum_binary(&report.payload)?, - }) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct HttpRollupException { - pub payload: String, -} - -impl TryFrom for RollupException { - type Error = DecodeError; - fn try_from( - report: HttpRollupException, - ) -> Result { - Ok(RollupException { - payload: conversions::decode_ethereum_binary(&report.payload)?, - }) - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct HttpFinishRequest { - pub status: String, -} - -impl TryFrom for FinishStatus { - type Error = DecodeStatusError; - fn try_from( - request: HttpFinishRequest, - ) -> Result { - match request.status.as_str() { - "accept" => Ok(FinishStatus::Accept), - "reject" => Ok(FinishStatus::Reject), - _ => Err(DecodeStatusError {}), - } - } -} - -#[derive(Debug, Serialize, Deserialize)] -pub struct HttpIndexResponse { - pub index: u64, -} - -#[derive(Debug, Snafu)] -#[snafu(display("status must be 'accept' or 'reject'"))] -pub struct DecodeStatusError {} diff --git a/offchain/host-runner/src/http/rollup_server.rs b/offchain/host-runner/src/http/rollup_server.rs deleted file mode 100644 index 2b85877c1..000000000 --- a/offchain/host-runner/src/http/rollup_server.rs +++ /dev/null @@ -1,127 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use actix_web::{ - error, error::Result as HttpResult, middleware::Logger, web::Data, - web::Json, App, HttpResponse, HttpServer, Responder, -}; - -use crate::config::Config; -use crate::controller::{Controller, ControllerError}; -use crate::model::{FinishStatus, Notice, Report, RollupException, Voucher}; - -use super::model::{ - HttpFinishRequest, HttpIndexResponse, HttpNotice, HttpReport, - HttpRollupException, HttpRollupRequest, HttpVoucher, -}; - -pub async fn start_service( - config: &Config, - controller: Controller, -) -> std::io::Result<()> { - HttpServer::new(move || { - App::new() - .app_data(Data::new(controller.clone())) - .wrap(Logger::default()) - .service(voucher) - .service(notice) - .service(report) - .service(exception) - .service(finish) - }) - .bind(( - config.http_rollup_server_address.as_str(), - config.http_rollup_server_port, - ))? - .run() - .await -} - -#[actix_web::post("/voucher")] -async fn voucher( - voucher: Json, - controller: Data, -) -> HttpResult { - let voucher: Voucher = voucher.into_inner().try_into()?; - let rx = controller.insert_voucher(voucher).await; - let index = rx.await.map_err(|_| { - tracing::error!("sender dropped the channel"); - error::ErrorInternalServerError("failed to insert voucher") - })??; - let response = HttpIndexResponse { - index: index as u64, - }; - Ok(HttpResponse::Ok().json(response)) -} - -#[actix_web::post("/notice")] -async fn notice( - notice: Json, - controller: Data, -) -> HttpResult { - let notice: Notice = notice.into_inner().try_into()?; - let rx = controller.insert_notice(notice).await; - let index = rx.await.map_err(|_| { - tracing::error!("sender dropped the channel"); - error::ErrorInternalServerError("failed to insert notice") - })??; - let response = HttpIndexResponse { - index: index as u64, - }; - Ok(HttpResponse::Ok().json(response)) -} - -#[actix_web::post("/report")] -async fn report( - report: Json, - controller: Data, -) -> HttpResult { - let report: Report = report.into_inner().try_into()?; - let rx = controller.insert_report(report).await; - rx.await.map_err(|_| { - tracing::error!("sender dropped the channel"); - error::ErrorInternalServerError("failed to insert report") - })??; - Ok(HttpResponse::Ok()) -} - -#[actix_web::post("/exception")] -async fn exception( - exception: Json, - controller: Data, -) -> HttpResult { - let exception: RollupException = exception.into_inner().try_into()?; - let rx = controller.notify_exception(exception).await; - rx.await.map_err(|_| { - tracing::error!("sender dropped the channel"); - error::ErrorInternalServerError("failed to notify exception") - })??; - Ok(HttpResponse::Ok()) -} - -#[actix_web::post("/finish")] -async fn finish( - body: Json, - controller: Data, -) -> HttpResult { - let status: FinishStatus = body.into_inner().try_into()?; - let rx = controller.finish(status).await; - let result = rx.await.map_err(|_| { - tracing::error!("sender dropped the channel"); - error::ErrorInternalServerError("failed to finish") - })?; - let response = match result { - Ok(rollup_request) => { - HttpResponse::Ok().json(HttpRollupRequest::from(rollup_request)) - } - Err(e) => match e { - ControllerError::FetchRequestTimeout => { - HttpResponse::Accepted().body(e.to_string()) - } - ControllerError::InvalidRequest { .. } => { - HttpResponse::BadRequest().body(e.to_string()) - } - }, - }; - Ok(response) -} diff --git a/offchain/host-runner/src/main.rs b/offchain/host-runner/src/main.rs deleted file mode 100644 index cefeb5841..000000000 --- a/offchain/host-runner/src/main.rs +++ /dev/null @@ -1,84 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod config; -mod controller; -mod conversions; -mod driver; -mod grpc; -mod hash; -mod http; -mod merkle_tree; -mod model; -mod proofs; - -use futures_util::FutureExt; -use std::sync::{atomic::AtomicBool, atomic::Ordering, Arc}; -use std::time::Duration; -use tokio::sync::oneshot; - -use clap::Parser; -use config::{CLIConfig, Config}; -use controller::Controller; - -fn log_result(name: &str, result: Result) { - let prefix = format!("http {} terminated ", name); - match result { - Ok(_) => tracing::info!("{} successfully", prefix), - Err(e) => tracing::warn!("{} with error: {}", prefix, e), - }; -} - -#[actix_web::main] -async fn main() { - let config: Config = CLIConfig::parse().into(); - - log::configure(&config.log_config); - - log::log_service_start(&config, "Host Runner"); - - let controller = - Controller::new(Duration::from_millis(config.finish_timeout)); - let http_service_running = Arc::new(AtomicBool::new(true)); - let (grpc_shutdown_tx, grpc_shutdown_rx) = oneshot::channel::<()>(); - let grpc_service = { - let controller = controller.clone(); - let config = config.clone(); - let shutdown = grpc_shutdown_rx.map(|_| ()); - let http_service_running = http_service_running.clone(); - tokio::spawn(async move { - log_result( - "gRPC service", - grpc::start_service(&config, controller.clone(), shutdown) - .await, - ); - if http_service_running.load(Ordering::Relaxed) { - panic!("gRPC service terminated before shutdown signal"); - } - }) - }; - - // We run the actix-web in the main thread because it handles the SIGINT - let host_runner_handle = http::start_services(&config, controller.clone()); - let health_handle = http_health_check::start(config.healthcheck_port); - tokio::select! { - result = health_handle => { - log_result("http health check", result); - } - result = host_runner_handle => { - log_result("http service", result); - } - } - http_service_running.store(false, Ordering::Relaxed); - - // Shutdown the other services - if let Err(e) = controller.shutdown().await.await { - tracing::error!("failed to shutdown controller ({})", e); - } - if grpc_shutdown_tx.send(()).is_err() { - tracing::error!("failed to send the shutdown signal to grpc"); - } - if let Err(e) = grpc_service.await { - tracing::error!("failed to shutdown the grpc service ({})", e); - } -} diff --git a/offchain/host-runner/src/merkle_tree/complete.rs b/offchain/host-runner/src/merkle_tree/complete.rs deleted file mode 100644 index fde9ae629..000000000 --- a/offchain/host-runner/src/merkle_tree/complete.rs +++ /dev/null @@ -1,476 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -//! Complete merkle tree based on Cartesi machine-emulator implementation - -use super::{ - get_concat_hash, pristine, proof::Proof, Error, - LeafSizeGreaterThanRootSizeSnafu, MisalignedAddressSnafu, - SizeOutOfRangeSnafu, TooManyLeavesSnafu, TreeIsFullSnafu, - TreeTooLargeSnafu, WordSizeGreaterThanLeafSizeSnafu, -}; -use crate::hash::{Digest, Hash, Hasher}; - -/// Complete merkle tree -/// -/// A merkle tree with any number of non-pristine leaves follwed by a number of pristine leaves. -/// The tree is optimized to store only the hashes that are not pristine. -#[derive(Debug)] -pub struct Tree { - log2_root_size: usize, - log2_leaf_size: usize, - pristine: pristine::Tree, - tree: Vec, -} - -impl Tree { - /// Create a new complete merkle tree - /// - /// - `log2_root_size`: Log2 of the size in bytes of the whole merkle tree. - /// - `log2_leaf_size`: Log2 of the size in bytes of a single leaf. - /// - `log2_word_size`: Log2 of the size in bytes of a single word. This is used to compute the - /// pristine hash of a leave. - pub fn new( - log2_root_size: usize, - log2_leaf_size: usize, - log2_word_size: usize, - ) -> Result { - snafu::ensure!( - log2_leaf_size <= log2_root_size, - LeafSizeGreaterThanRootSizeSnafu - ); - snafu::ensure!( - log2_word_size <= log2_leaf_size, - WordSizeGreaterThanLeafSizeSnafu - ); - snafu::ensure!( - log2_root_size <= std::mem::size_of::() * 8, - TreeTooLargeSnafu - ); - Ok(Self { - log2_root_size, - log2_leaf_size, - pristine: pristine::Tree::new(log2_root_size, log2_word_size)?, - tree: vec![vec![]; log2_root_size - log2_leaf_size + 1], - }) - } - - /// Create a new complete merkle tree from non-pristine leaves - /// - /// - `leaves`: Array with non-pristine hash leaves bound to the left side of the tree. - /// - /// For more information regarding the other parameters, see Tree::new(). - pub fn new_from_leaves( - log2_root_size: usize, - log2_leaf_size: usize, - log2_word_size: usize, - leaves: Level, - ) -> Result { - let max_len = 1 << (log2_root_size - log2_leaf_size); - snafu::ensure!(leaves.len() <= max_len, TooManyLeavesSnafu); - let mut tree = - Self::new(log2_root_size, log2_leaf_size, log2_word_size)?; - let level = tree.get_level_mut(log2_leaf_size).expect("cannot fail"); - *level = leaves; - tree.bubble_up(); - Ok(tree) - } - - /// Return the tree's root hash - pub fn get_root_hash(&self) -> &Hash { - self.get_node_hash(0, self.log2_root_size) - .expect("cannot fail") - } - - /// Return proof for a given node - /// - /// - `address`: The address is represented by the node index at the level shifted by - /// `log2_size`. - /// - `log2_size`: Log2 of the size in bytes of the subtree. - pub fn get_proof( - &self, - address: usize, - log2_size: usize, - ) -> Result { - snafu::ensure!( - log2_size >= self.log2_leaf_size - && log2_size <= self.log2_root_size, - SizeOutOfRangeSnafu - ); - let aligned_address = (address >> log2_size) << log2_size; - snafu::ensure!(address == aligned_address, MisalignedAddressSnafu); - let target_hash = self.get_node_hash(address, log2_size)?.clone(); - let log2_root_size = self.log2_root_size; - let root_hash = self.get_root_hash().clone(); - let mut proof = Proof::new( - address, - log2_size, - target_hash, - log2_root_size, - root_hash, - )?; - for log2_sibling_size in log2_size..log2_root_size { - let sibling_address = address ^ (1 << log2_sibling_size); - let hash = - self.get_node_hash(sibling_address, log2_sibling_size)?; - proof.set_sibling_hash(hash.clone(), log2_sibling_size)?; - } - Ok(proof) - } - - /// Append a new leaf hash to the tree - /// - /// - `leaf`: Hash to append. - pub fn push(&mut self, leaf: Hash) -> Result<(), Error> { - let max_len = 1 << (self.log2_root_size - self.log2_leaf_size); - let leaves = self - .get_level_mut(self.log2_leaf_size) - .expect("cannot fail"); - snafu::ensure!(leaves.len() < max_len, TreeIsFullSnafu); - leaves.push(leaf); - self.bubble_up(); - Ok(()) - } - - /// Return the number of leafs - pub fn len(&self) -> usize { - self.get_level(self.log2_leaf_size) - .expect("cannot fail") - .len() - } - - /// Return the hash of a node at a given address - /// - /// For more information regarding the other parameters, see Tree::get_proof(). - fn get_node_hash( - &self, - address: usize, - log2_size: usize, - ) -> Result<&Hash, Error> { - let address = address >> log2_size; - let bounds = 1 << (self.log2_root_size - log2_size); - snafu::ensure!(address < bounds, SizeOutOfRangeSnafu); - let level = self.get_level(log2_size)?; - if address < level.len() { - Ok(&level[address]) - } else { - self.pristine.get_hash(log2_size) - } - } - - /// Update node hashes when a new set of non-pristine nodes is added to the leaf level - fn bubble_up(&mut self) { - let mut hasher = Hasher::new(); - // Go bottom up, updating hashes - for log2_prev_size in self.log2_leaf_size..self.log2_root_size { - let log2_next_size = log2_prev_size + 1; - // Extract the next level from self to deal with borrow-checker - let mut next = vec![]; - std::mem::swap( - &mut next, - self.get_level_mut(log2_next_size).expect("cannot fail"), - ); - let prev = self.get_level(log2_prev_size).expect("cannot fail"); - // Redo last entry (if any) because it may have been constructed - // from the last non-pristine entry in the previous level paired - // with a pristine entry (i.e., the previous level was odd). - let first_entry = if next.is_empty() { 0 } else { next.len() - 1 }; - // Next level needs half as many (rounded up) as previous - next.resize_with((prev.len() + 1) / 2, Default::default); - // Last safe entry has two non-pristine leafs - let last_safe_entry = prev.len() / 2; - // Do all entries for which we have two non-pristine children - for i in first_entry..last_safe_entry { - next[i] = get_concat_hash( - &mut hasher, - &prev[2 * i], - &prev[2 * i + 1], - ); - } - // Maybe do last odd entry - if prev.len() > 2 * last_safe_entry { - let prev_pristine = self - .pristine - .get_hash(log2_prev_size) - .expect("cannot fail"); - next[last_safe_entry] = get_concat_hash( - &mut hasher, - &prev[prev.len() - 1], - prev_pristine, - ); - } - // Put the level back in self - std::mem::swap( - &mut next, - self.get_level_mut(log2_next_size).expect("cannot fail"), - ); - } - } - - /// Return the hashes at the given level - fn get_level(&self, log2_size: usize) -> Result<&Level, Error> { - let index = self.get_level_index(log2_size)?; - Ok(&self.tree[index]) - } - - /// Mutable version of Tree::get_level() - fn get_level_mut(&mut self, log2_size: usize) -> Result<&mut Level, Error> { - let index = self.get_level_index(log2_size)?; - Ok(&mut self.tree[index]) - } - - /// Compute the level index given the sub-tree size - /// - /// - `log2_size`: Log2 of the size in bytes of the subtree. - fn get_level_index(&self, log2_size: usize) -> Result { - snafu::ensure!( - log2_size >= self.log2_leaf_size - && log2_size <= self.log2_root_size, - SizeOutOfRangeSnafu - ); - Ok(self.log2_root_size - log2_size) - } -} - -type Level = Vec; - -#[cfg(test)] -mod tests { - use super::*; - use crate::hash::HASH_SIZE; - - fn compare_to_pristine( - tree: Tree, - log2_root_size: usize, - log2_leaf_size: usize, - log2_word_size: usize, - ) { - let pristine = - pristine::Tree::new(log2_root_size, log2_word_size).unwrap(); - for log2_size in log2_leaf_size..log2_root_size { - let max_address = 1 << (log2_root_size - log2_size); - for address in 0..max_address { - assert_eq!( - tree.get_node_hash(address << log2_size, log2_size) - .unwrap(), - pristine.get_hash(log2_size).unwrap() - ); - } - } - } - - #[test] - fn test_it_fails_to_create_a_tree_with_leaf_size_greater_than_root_size() { - let err = Tree::new(2, 3, 0).unwrap_err(); - assert_eq!(err, Error::LeafSizeGreaterThanRootSize); - } - - #[test] - fn test_it_fails_to_create_a_tree_with_word_size_greater_than_leaf_size() { - let err = Tree::new(2, 1, 2).unwrap_err(); - assert_eq!(err, Error::WordSizeGreaterThanLeafSize); - } - - #[test] - fn test_it_fails_to_create_that_does_not_fit_in_memory() { - let err = Tree::new(65, 1, 0).unwrap_err(); - assert_eq!(err, Error::TreeTooLarge); - } - - #[test] - fn test_it_is_equals_to_pristine_tree_when_empty() { - let tree = Tree::new(8, 3, 0).unwrap(); - compare_to_pristine(tree, 8, 3, 0); - } - - #[test] - fn test_it_fails_to_create_tree_with_too_many_leaves() { - // It should have at most 2 leaves - let leaves = vec![Hash::default(); 3]; - let err = Tree::new_from_leaves(3, 2, 1, leaves).unwrap_err(); - assert_eq!(err, Error::TooManyLeaves); - } - - #[test] - fn test_it_is_equals_to_pristine_tree_when_created_with_no_leaves() { - let leaves = vec![]; - let tree = Tree::new_from_leaves(8, 3, 0, leaves).unwrap(); - compare_to_pristine(tree, 8, 3, 0); - } - - #[test] - fn test_it_works_propertly_when_created_with_all_leaves() { - let leaves = vec![Hash::from([0xFF; HASH_SIZE]); 8]; - let tree = Tree::new_from_leaves(3, 0, 0, leaves).unwrap(); - assert_eq!(tree.get_level(0).unwrap().len(), 8); - assert_eq!(tree.get_level(1).unwrap().len(), 4); - assert_eq!(tree.get_level(2).unwrap().len(), 2); - assert_eq!(tree.get_level(3).unwrap().len(), 1); - assert_eq!( - tree.get_root_hash(), - &Hash::decode("ec06b3285e5018dbd1981c64dbf6e9cea02fa591f0322b51cfbc31a729500928") - ); - } - - #[test] - fn test_it_works_propertly_when_created_with_odd_number_of_leaves() { - let leaves = vec![Hash::from([0xFF; HASH_SIZE]); 3]; - let tree = Tree::new_from_leaves(3, 0, 0, leaves).unwrap(); - assert_eq!(tree.get_level(0).unwrap().len(), 3); - assert_eq!(tree.get_level(1).unwrap().len(), 2); - assert_eq!(tree.get_level(2).unwrap().len(), 1); - assert_eq!(tree.get_level(3).unwrap().len(), 1); - assert_eq!( - tree.get_root_hash(), - &Hash::decode("4d41dd9b105ebb70fbed098caccf3d56b286c690a96202c1357f951cca4dad5d") - ); - } - - #[test] - fn test_it_works_properly_when_root_size_equals_to_leaf_size() { - let leaves = vec![Hash::from([0xFF; HASH_SIZE])]; - let tree = Tree::new_from_leaves(0, 0, 0, leaves).unwrap(); - assert_eq!(tree.get_root_hash(), &Hash::from([0xFF; HASH_SIZE])); - } - - #[test] - fn test_it_computes_the_level_index_properly() { - let tree = Tree::new(3, 1, 0).unwrap(); - assert_eq!(tree.get_level_index(1).unwrap(), 2); - assert_eq!(tree.get_level_index(2).unwrap(), 1); - assert_eq!(tree.get_level_index(3).unwrap(), 0); - } - - #[test] - fn test_it_fails_to_compute_the_index_when_log2_size_is_out_of_range() { - let tree = Tree::new(3, 2, 1).unwrap(); - assert_eq!(tree.get_level_index(4).unwrap_err(), Error::SizeOutOfRange); - assert_eq!(tree.get_level_index(1).unwrap_err(), Error::SizeOutOfRange); - } - - #[test] - fn test_it_fails_to_get_proof_when_log2_size_is_out_of_range() { - let tree = Tree::new(3, 2, 1).unwrap(); - assert_eq!(tree.get_proof(0, 4).unwrap_err(), Error::SizeOutOfRange); - assert_eq!(tree.get_proof(0, 1).unwrap_err(), Error::SizeOutOfRange); - } - - #[test] - fn test_it_fails_to_get_proof_when_address_is_misalign() { - let tree = Tree::new(3, 2, 1).unwrap(); - assert_eq!( - tree.get_proof((0 << 2) + 1, 2).unwrap_err(), - Error::MisalignedAddress - ); - assert_eq!( - tree.get_proof((1 << 2) + 1, 2).unwrap_err(), - Error::MisalignedAddress - ); - } - - #[test] - fn test_it_fails_to_get_proof_when_address_is_out_of_bound() { - let tree = Tree::new(3, 2, 1).unwrap(); - assert_eq!( - tree.get_proof(2 << 2, 2).unwrap_err(), - Error::SizeOutOfRange - ); - assert_eq!( - tree.get_proof(1 << 3, 2).unwrap_err(), - Error::SizeOutOfRange - ); - } - - #[test] - fn test_it_gets_correct_proof_of_root_node() { - let tree = Tree::new(3, 2, 1).unwrap(); - let proof = tree.get_proof(0, 3).unwrap(); - let root_hash = tree.pristine.get_hash(3).unwrap(); - assert_eq!(proof.target_address, 0); - assert_eq!(proof.log2_target_size, 3); - assert_eq!(&proof.target_hash, root_hash); - assert_eq!(proof.log2_root_size, 3); - assert_eq!(&proof.root_hash, root_hash); - assert_eq!(proof.sibling_hashes, vec![]); - } - - #[test] - fn test_it_gets_correct_proof_of_leaf_node_when_empty() { - let tree = Tree::new(3, 0, 0).unwrap(); - let proof = tree.get_proof(0, 0).unwrap(); - assert_eq!(proof.target_address, 0); - assert_eq!(proof.log2_target_size, 0); - assert_eq!(&proof.target_hash, tree.pristine.get_hash(0).unwrap()); - assert_eq!(proof.log2_root_size, 3); - assert_eq!(&proof.root_hash, tree.pristine.get_hash(3).unwrap()); - assert_eq!( - proof.sibling_hashes, - vec![ - tree.pristine.get_hash(0).unwrap().clone(), - tree.pristine.get_hash(1).unwrap().clone(), - tree.pristine.get_hash(2).unwrap().clone(), - ] - ); - } - - #[test] - fn test_it_gets_correct_proof_of_leaf_node_when_half_full() { - let leaves = vec![Hash::from([0xFF; HASH_SIZE]); 3]; - let tree = Tree::new_from_leaves(3, 0, 0, leaves).unwrap(); - let proof = tree.get_proof(3, 0).unwrap(); - assert_eq!(proof.target_address, 3); - assert_eq!(proof.log2_target_size, 0); - assert_eq!(&proof.target_hash, tree.pristine.get_hash(0).unwrap()); - assert_eq!(proof.log2_root_size, 3); - assert_eq!( - proof.root_hash, - Hash::decode("4d41dd9b105ebb70fbed098caccf3d56b286c690a96202c1357f951cca4dad5d") - ); - assert_eq!( - proof.sibling_hashes, - vec![ - Hash::from([0xFF; HASH_SIZE]), - Hash::decode("bd8b151773dbbefd7b0df67f2dcc482901728b6df477f4fb2f192733a005d396"), - Hash::decode("bb1bfb5bfc9ba6ba8e25341a7b70725d8f74121b9e31dd2314e68e27b8d24244"), - ] - ); - } - - #[test] - fn test_it_pushes_leaf_in_empty_tree() { - let mut tree = Tree::new(3, 0, 0).unwrap(); - tree.push(Hash::from([0xFF; HASH_SIZE])).unwrap(); - assert_eq!(tree.get_level(0).unwrap().len(), 1); - assert_eq!(tree.get_level(1).unwrap().len(), 1); - assert_eq!(tree.get_level(2).unwrap().len(), 1); - assert_eq!(tree.get_level(3).unwrap().len(), 1); - assert_eq!( - tree.get_root_hash(), - &Hash::decode("da0df8c5459cfe821402ad885e95e31e6bb4abf940e13b4c69afa5245b4eeb7d") - ); - } - - #[test] - fn test_it_pushes_leaf_in_almost_full_tree() { - let mut tree = Tree::new(3, 0, 0).unwrap(); - for i in 0..8 { - tree.push(Hash::from([0xFF; HASH_SIZE])).unwrap(); - assert_eq!(tree.get_level(0).unwrap().len(), i + 1); - } - assert_eq!(tree.get_level(1).unwrap().len(), 4); - assert_eq!(tree.get_level(2).unwrap().len(), 2); - assert_eq!(tree.get_level(3).unwrap().len(), 1); - assert_eq!( - tree.get_root_hash(), - &Hash::decode("ec06b3285e5018dbd1981c64dbf6e9cea02fa591f0322b51cfbc31a729500928") - ); - } - - #[test] - fn test_it_fails_to_push_leaf_in_full_tree() { - let leaves = vec![Hash::from([0xFF; HASH_SIZE]); 8]; - let mut tree = Tree::new_from_leaves(3, 0, 0, leaves).unwrap(); - let err = tree.push(Hash::default()).unwrap_err(); - assert_eq!(err, Error::TreeIsFull); - } -} diff --git a/offchain/host-runner/src/merkle_tree/mod.rs b/offchain/host-runner/src/merkle_tree/mod.rs deleted file mode 100644 index 82b205c45..000000000 --- a/offchain/host-runner/src/merkle_tree/mod.rs +++ /dev/null @@ -1,39 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub mod complete; -pub mod pristine; -pub mod proof; - -use snafu::Snafu; - -use crate::hash::{Digest, Hash, Hasher}; - -#[derive(Debug, Snafu, PartialEq)] -pub enum Error { - #[snafu(display("log2_target_size is greater than log2_root_size"))] - TargetSizeGreaterThanRootSize, - #[snafu(display("log2_leaf_size is greater than log2_root_size"))] - LeafSizeGreaterThanRootSize, - #[snafu(display("log2_word_size is greater than log2_leaf_size"))] - WordSizeGreaterThanLeafSize, - #[snafu(display("log2_word_size is greater than log2_root_size"))] - WordSizeGreaterThanRootSize, - #[snafu(display("tree is too large for address type"))] - TreeTooLarge, - #[snafu(display("tree is full"))] - TreeIsFull, - #[snafu(display("too many leaves"))] - TooManyLeaves, - #[snafu(display("log2_size is out of range"))] - SizeOutOfRange, - #[snafu(display("address is misaligned"))] - MisalignedAddress, -} - -fn get_concat_hash(hasher: &mut Hasher, left: &Hash, right: &Hash) -> Hash { - hasher.reset(); - hasher.update(left.data()); - hasher.update(right.data()); - hasher.finalize_reset().into() -} diff --git a/offchain/host-runner/src/merkle_tree/pristine.rs b/offchain/host-runner/src/merkle_tree/pristine.rs deleted file mode 100644 index a203702af..000000000 --- a/offchain/host-runner/src/merkle_tree/pristine.rs +++ /dev/null @@ -1,127 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -//! Pristine merkle tree based on Cartesi machine-emulator implementation - -use super::{ - get_concat_hash, Error, SizeOutOfRangeSnafu, - WordSizeGreaterThanRootSizeSnafu, -}; -use crate::hash::{Digest, Hash, Hasher}; - -/// Merkle tree where all leaves are zero -#[derive(Debug)] -pub struct Tree { - log2_root_size: usize, - log2_word_size: usize, - hashes: Vec, -} - -impl Tree { - /// Create a new pristine merkle tree - /// - /// - `log2_root_size`: Log2 of the size in bytes of the whole merkle tree. - /// - `log2_word_size`: Log2 of the size in bytes of a single word. - pub fn new( - log2_root_size: usize, - log2_word_size: usize, - ) -> Result { - snafu::ensure!( - log2_word_size <= log2_root_size, - WordSizeGreaterThanRootSizeSnafu - ); - let num_hashes = log2_root_size - log2_word_size + 1; - let mut hashes = vec![]; - let mut hasher = Hasher::new(); - let word: Vec = vec![0; 1 << log2_word_size]; - hasher.update(&word); - hashes.push(hasher.finalize_reset().into()); - for i in 1..num_hashes { - hashes.push(get_concat_hash( - &mut hasher, - &hashes[i - 1], - &hashes[i - 1], - )); - } - Ok(Self { - log2_root_size, - log2_word_size, - hashes, - }) - } - - /// Get a hash for a sub-tree of the given size - /// - /// - `log2_size`: Log2 of the size in bytes of the subtree. - pub fn get_hash(&self, log2_size: usize) -> Result<&Hash, Error> { - snafu::ensure!( - log2_size >= self.log2_word_size - && log2_size <= self.log2_root_size, - SizeOutOfRangeSnafu - ); - Ok(&self.hashes[log2_size - self.log2_word_size]) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_it_fails_to_create_a_tree_with_word_size_greater_than_root_size() { - let err = Tree::new(2, 3).unwrap_err(); - assert_eq!(err, Error::WordSizeGreaterThanRootSize); - } - - #[test] - fn test_it_fails_to_get_hash_greater_than_root_size() { - let tree = Tree::new(5, 3).unwrap(); - let err = tree.get_hash(6).unwrap_err(); - assert_eq!(err, Error::SizeOutOfRange); - } - - #[test] - fn test_it_fails_to_get_hash_smaller_than_word_size() { - let tree = Tree::new(5, 3).unwrap(); - let err = tree.get_hash(2).unwrap_err(); - assert_eq!(err, Error::SizeOutOfRange); - } - - #[test] - fn test_it_creates_a_tree_with_root_size_equals_to_word_size() { - let tree = Tree::new(5, 5).unwrap(); - assert_eq!( - tree.get_hash(5).unwrap(), - &Hash::decode("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563") - ); - } - - #[test] - fn test_it_creates_a_tree_with_correct_hashes() { - let tree = Tree::new(8, 3).unwrap(); - assert_eq!( - tree.get_hash(3).unwrap(), - &Hash::decode("011b4d03dd8c01f1049143cf9c4c817e4b167f1d1b83e5c6f0f10d89ba1e7bce") - ); - assert_eq!( - tree.get_hash(4).unwrap(), - &Hash::decode("4d9470a821fbe90117ec357e30bad9305732fb19ddf54a07dd3e29f440619254") - ); - assert_eq!( - tree.get_hash(5).unwrap(), - &Hash::decode("ae39ce8537aca75e2eff3e38c98011dfe934e700a0967732fc07b430dd656a23") - ); - assert_eq!( - tree.get_hash(6).unwrap(), - &Hash::decode("3fc9a15f5b4869c872f81087bb6104b7d63e6f9ab47f2c43f3535eae7172aa7f") - ); - assert_eq!( - tree.get_hash(7).unwrap(), - &Hash::decode("17d2dd614cddaa4d879276b11e0672c9560033d3e8453a1d045339d34ba601b9") - ); - assert_eq!( - tree.get_hash(8).unwrap(), - &Hash::decode("c37b8b13ca95166fb7af16988a70fcc90f38bf9126fd833da710a47fb37a55e6") - ); - } -} diff --git a/offchain/host-runner/src/merkle_tree/proof.rs b/offchain/host-runner/src/merkle_tree/proof.rs deleted file mode 100644 index d40955dbe..000000000 --- a/offchain/host-runner/src/merkle_tree/proof.rs +++ /dev/null @@ -1,69 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -//! Merkle tree proof based on Cartesi machine-emulator implementation - -use super::{Error, SizeOutOfRangeSnafu, TargetSizeGreaterThanRootSizeSnafu}; - -use crate::hash::Hash; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Proof { - pub target_address: usize, - pub log2_target_size: usize, - pub target_hash: Hash, - pub log2_root_size: usize, - pub root_hash: Hash, - pub sibling_hashes: Vec, -} - -/// Merkle tree proof structure -/// -/// This structure holds a proof that the node spanning a log2_target_size at a given address in -/// the tree has a certain hash. -impl Proof { - /// Constructs a merkle_tree_proof object and allocates room for the sibling hashes - pub fn new( - target_address: usize, - log2_target_size: usize, - target_hash: Hash, - log2_root_size: usize, - root_hash: Hash, - ) -> Result { - snafu::ensure!( - log2_target_size <= log2_root_size, - TargetSizeGreaterThanRootSizeSnafu - ); - Ok(Self { - target_address, - log2_target_size, - target_hash, - log2_root_size, - root_hash, - sibling_hashes: vec![ - Hash::default(); - log2_root_size - log2_target_size - ], - }) - } - - /// Modify hash corresponding to log2_size in the list of siblings. - pub fn set_sibling_hash( - &mut self, - hash: Hash, - log2_size: usize, - ) -> Result<(), Error> { - let index = self.log2_size_to_index(log2_size)?; - self.sibling_hashes[index] = hash; - Ok(()) - } - - /// Converts log2_size to index into siblings array - fn log2_size_to_index(&self, log2_size: usize) -> Result { - snafu::ensure!(log2_size < self.log2_root_size, SizeOutOfRangeSnafu); - snafu::ensure!(log2_size >= self.log2_target_size, SizeOutOfRangeSnafu); - let index = log2_size - self.log2_target_size; - snafu::ensure!(index < self.sibling_hashes.len(), SizeOutOfRangeSnafu); - Ok(index) - } -} diff --git a/offchain/host-runner/src/model.rs b/offchain/host-runner/src/model.rs deleted file mode 100644 index 282e4bbb6..000000000 --- a/offchain/host-runner/src/model.rs +++ /dev/null @@ -1,209 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::conversions; -use crate::driver::{compute_notice_hash, compute_voucher_hash}; -use crate::hash::Hash; -use crate::merkle_tree::proof::Proof; -use crate::proofs::Proofable; - -const ADDRESS_SIZE: usize = 20; - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct AdvanceStateRequest { - pub metadata: AdvanceMetadata, - pub payload: Vec, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct AdvanceMetadata { - pub msg_sender: [u8; ADDRESS_SIZE], - pub epoch_index: u64, - pub input_index: u64, - pub block_number: u64, - pub timestamp: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct AdvanceResult { - pub status: CompletionStatus, - pub reports: Vec, - pub voucher_hashes_in_epoch: Option, - pub voucher_root: Option, - pub notice_hashes_in_epoch: Option, - pub notice_root: Option, -} - -impl AdvanceResult { - pub fn accepted( - vouchers: Vec, - notices: Vec, - reports: Vec, - ) -> Self { - let status = CompletionStatus::Accepted { vouchers, notices }; - Self::new(status, reports) - } - - pub fn rejected(reports: Vec) -> Self { - Self::new(CompletionStatus::Rejected, reports) - } - - pub fn exception(exception: RollupException, reports: Vec) -> Self { - let status = CompletionStatus::Exception { exception }; - Self::new(status, reports) - } - - fn new(status: CompletionStatus, reports: Vec) -> Self { - Self { - status, - reports, - voucher_hashes_in_epoch: None, - voucher_root: None, - notice_hashes_in_epoch: None, - notice_root: None, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum CompletionStatus { - Accepted { - vouchers: Vec, - notices: Vec, - }, - Rejected, - Exception { - exception: RollupException, - }, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct InspectStateRequest { - pub payload: Vec, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct InspectResult { - pub status: InspectStatus, - pub reports: Vec, -} - -impl InspectResult { - pub fn accepted(reports: Vec) -> Self { - Self { - status: InspectStatus::Accepted, - reports, - } - } - - pub fn rejected(reports: Vec) -> Self { - Self { - status: InspectStatus::Rejected, - reports, - } - } - - pub fn exception(reports: Vec, exception: RollupException) -> Self { - Self { - status: InspectStatus::Exception { exception }, - reports, - } - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum InspectStatus { - Accepted, - Rejected, - Exception { exception: RollupException }, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum FinishStatus { - Accept, - Reject, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub enum RollupRequest { - AdvanceState(AdvanceStateRequest), - InspectState(InspectStateRequest), -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Voucher { - pub destination: [u8; ADDRESS_SIZE], - pub payload: Vec, - pub keccak: Hash, - pub keccak_in_voucher_hashes: Option, -} - -impl Voucher { - pub fn new(destination: [u8; ADDRESS_SIZE], payload: Vec) -> Self { - let keccak = compute_voucher_hash(&destination, &payload); - Self { - destination, - payload, - keccak, - keccak_in_voucher_hashes: None, - } - } -} - -impl Proofable for Voucher { - fn get_hash(&self) -> &Hash { - &self.keccak - } - - fn set_proof(&mut self, proof: Proof) { - self.keccak_in_voucher_hashes = Some(proof); - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Notice { - pub payload: Vec, - pub keccak: Hash, - pub keccak_in_notice_hashes: Option, -} - -impl Notice { - pub fn new(payload: Vec) -> Self { - let keccak = compute_notice_hash(&payload); - Self { - payload, - keccak, - keccak_in_notice_hashes: None, - } - } -} - -impl Proofable for Notice { - fn get_hash(&self) -> &Hash { - &self.keccak - } - - fn set_proof(&mut self, proof: Proof) { - self.keccak_in_notice_hashes = Some(proof); - } -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct Report { - pub payload: Vec, -} - -#[derive(Debug, Clone, PartialEq, Eq)] -pub struct RollupException { - pub payload: Vec, -} - -impl std::fmt::Display for RollupException { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!( - f, - "rollup exception ({})", - conversions::encode_ethereum_binary(&self.payload) - ) - } -} diff --git a/offchain/host-runner/src/proofs.rs b/offchain/host-runner/src/proofs.rs deleted file mode 100644 index a3bb830ea..000000000 --- a/offchain/host-runner/src/proofs.rs +++ /dev/null @@ -1,47 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::hash::{Digest, Hash, Hasher}; -use crate::merkle_tree::{self, complete::Tree, proof::Proof}; - -const LOG2_ROOT_SIZE: usize = 16 + LOG2_HASH_SIZE; -const LOG2_WORD_SIZE: usize = 3; -const LOG2_HASH_SIZE: usize = 5; -const WORD_SIZE: usize = 1 << LOG2_WORD_SIZE; -const WORDS_PER_HASH: usize = 1 << (LOG2_HASH_SIZE - LOG2_WORD_SIZE); - -/// Trait to be implemented by vouchers and notices -pub trait Proofable { - fn get_hash(&self) -> &Hash; - fn set_proof(&mut self, proof: Proof); -} - -/// Update the merkle proofs of every proofable in the array and return the merkle-tree's root hash -pub fn compute_proofs( - proofables: &mut [impl Proofable], -) -> Result { - let mut hasher = Hasher::new(); - let mut leaves: Vec = vec![]; - for proofable in proofables.iter() { - let hash = proofable.get_hash(); - for word in 0..WORDS_PER_HASH { - let start = word * WORD_SIZE; - let end = start + WORD_SIZE; - hasher.update(&hash.data()[start..end]); - let word_hash = hasher.finalize_reset().into(); - leaves.push(word_hash); - } - } - let tree = Tree::new_from_leaves( - LOG2_ROOT_SIZE, - LOG2_WORD_SIZE, - LOG2_WORD_SIZE, - leaves, - )?; - for (i, proofable) in proofables.iter_mut().enumerate() { - let proof = - tree.get_proof(i * (1 << LOG2_HASH_SIZE), LOG2_HASH_SIZE)?; - proofable.set_proof(proof); - } - Ok(tree.get_root_hash().clone()) -} diff --git a/offchain/host-runner/tests/common/config.rs b/offchain/host-runner/tests/common/config.rs deleted file mode 100644 index 8f9eaae4c..000000000 --- a/offchain/host-runner/tests/common/config.rs +++ /dev/null @@ -1,28 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub const GRPC_SERVER_MANAGER_PORT: u16 = 50001; -pub const HTTP_INSPECT_PORT: u16 = 50002; -pub const HTTP_ROLLUP_SERVER_PORT: u16 = 50004; -pub const FINISH_TIMEOUT: u64 = 100; - -pub fn get_grpc_server_manager_address() -> String { - format!("http://127.0.0.1:{}", GRPC_SERVER_MANAGER_PORT) -} - -pub fn get_http_inspect_address() -> String { - format!("http://127.0.0.1:{}", HTTP_INSPECT_PORT) -} - -pub fn get_http_rollup_server_address() -> String { - format!("http://127.0.0.1:{}", HTTP_ROLLUP_SERVER_PORT) -} - -pub fn get_host_runner_path() -> String { - std::env::var("CARTESI_HOST_RUNNER_PATH") - .unwrap_or(String::from("../target/debug/cartesi-rollups-host-runner")) -} - -pub fn get_test_verbose() -> bool { - std::env::var("CARTESI_TEST_VERBOSE").is_ok() -} diff --git a/offchain/host-runner/tests/common/grpc_client.rs b/offchain/host-runner/tests/common/grpc_client.rs deleted file mode 100644 index 52a665a5f..000000000 --- a/offchain/host-runner/tests/common/grpc_client.rs +++ /dev/null @@ -1,57 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub use grpc_interfaces::cartesi_machine::*; -pub use grpc_interfaces::cartesi_server_manager::*; - -use super::config; - -pub type ServerManagerClient = - server_manager_client::ServerManagerClient; - -pub async fn connect() -> ServerManagerClient { - ServerManagerClient::connect(config::get_grpc_server_manager_address()) - .await - .expect("failed to connect to grpc server") -} - -pub fn create_timestamp() -> u64 { - std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_secs() -} - -pub fn create_start_session_request(session_id: &str) -> StartSessionRequest { - StartSessionRequest { - session_id: session_id.into(), - machine_directory: "".into(), - active_epoch_index: 0, - processed_input_count: 0, - server_cycles: None, - server_deadline: None, - runtime: None, - } -} - -pub fn create_advance_state_request( - session_id: &str, - epoch_index: u64, - input_index: u64, -) -> AdvanceStateRequest { - AdvanceStateRequest { - session_id: session_id.into(), - active_epoch_index: epoch_index, - current_input_index: input_index, - input_metadata: Some(InputMetadata { - msg_sender: Some(Address { - data: super::create_address(), - }), - block_number: 0, - timestamp: create_timestamp(), - epoch_index: 0, //this field is deprecated and should always be 0 - input_index, - }), - input_payload: super::create_payload(), - } -} diff --git a/offchain/host-runner/tests/common/http_client.rs b/offchain/host-runner/tests/common/http_client.rs deleted file mode 100644 index f3f287d13..000000000 --- a/offchain/host-runner/tests/common/http_client.rs +++ /dev/null @@ -1,118 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub use rollups_http_client::rollup::*; - -use reqwest::Response; -use serde::{de::DeserializeOwned, Deserialize}; -use std::collections::HashMap; - -use super::config; - -#[derive(Debug, PartialEq)] -pub struct HttpError { - pub status: u16, - pub message: String, -} - -#[derive(Debug, Deserialize, PartialEq)] -#[serde(tag = "request_type")] -pub enum RollupHttpRequest { - #[serde(rename = "advance_state")] - Advance { data: AdvanceRequest }, - #[serde(rename = "inspect_state")] - Inspect { data: InspectRequest }, -} - -#[derive(Debug, Deserialize, PartialEq)] -pub struct InspectStateResponse { - pub reports: Vec, -} - -#[derive(Debug, Deserialize, PartialEq)] -pub struct IndexResponse { - pub index: usize, -} - -pub fn convert_binary_to_hex(payload: &Vec) -> String { - String::from("0x") + &hex::encode(payload) -} - -pub fn create_address() -> String { - convert_binary_to_hex(&super::create_address()) -} - -pub fn create_payload() -> String { - convert_binary_to_hex(&super::create_payload()) -} - -pub async fn finish(status: String) -> Result { - let url = format!("{}/finish", config::get_http_rollup_server_address()); - let mut request = HashMap::new(); - request.insert("status", status); - let client = reqwest::Client::new(); - let response = client.post(url).json(&request).send().await.unwrap(); - handle_json_response(response).await -} - -pub async fn insert_voucher( - destination: String, - payload: String, -) -> Result { - let url = format!("{}/voucher", config::get_http_rollup_server_address()); - let mut request = HashMap::new(); - request.insert("destination", destination); - request.insert("payload", payload); - let client = reqwest::Client::new(); - let response = client.post(url).json(&request).send().await.unwrap(); - handle_json_response(response).await -} - -pub async fn insert_notice( - payload: String, -) -> Result { - let url = format!("{}/notice", config::get_http_rollup_server_address()); - let mut request = HashMap::new(); - request.insert("payload", payload); - let client = reqwest::Client::new(); - let response = client.post(url).json(&request).send().await.unwrap(); - handle_json_response(response).await -} - -pub async fn insert_report(payload: String) -> Result<(), HttpError> { - let url = format!("{}/report", config::get_http_rollup_server_address()); - let mut request = HashMap::new(); - request.insert("payload", payload); - let client = reqwest::Client::new(); - let response = client.post(url).json(&request).send().await.unwrap(); - handle_response(response).await.map(|_| ()) -} - -pub async fn notify_exception(payload: String) -> Result<(), HttpError> { - let url = format!("{}/exception", config::get_http_rollup_server_address()); - let mut request = HashMap::new(); - request.insert("payload", payload); - let client = reqwest::Client::new(); - let response = client.post(url).json(&request).send().await.unwrap(); - handle_response(response).await.map(|_| ()) -} - -async fn handle_response(response: Response) -> Result { - if response.status() == reqwest::StatusCode::OK { - Ok(response) - } else { - Err(HttpError { - status: response.status().as_u16(), - message: response.text().await.unwrap(), - }) - } -} - -async fn handle_json_response( - response: Response, -) -> Result { - match handle_response(response).await { - Ok(response) => Ok(response.json::().await.unwrap()), - Err(e) => Err(e), - } -} diff --git a/offchain/host-runner/tests/common/manager.rs b/offchain/host-runner/tests/common/manager.rs deleted file mode 100644 index e4b701b99..000000000 --- a/offchain/host-runner/tests/common/manager.rs +++ /dev/null @@ -1,57 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use std::process::{Child, Command, Stdio}; -use std::time::Duration; - -use super::config; -use super::grpc_client::{ServerManagerClient, Void}; - -pub struct Wrapper { - child: Child, -} - -impl Wrapper { - /// Start the manager and waits until it is ready to answer - pub async fn new() -> Self { - let mut command = Command::new(config::get_host_runner_path()); - command - .env("RUST_LOG", "host_runner=debug,info") - .arg("--grpc-server-manager-port") - .arg(config::GRPC_SERVER_MANAGER_PORT.to_string()) - .arg("--http-inspect-port") - .arg(config::HTTP_INSPECT_PORT.to_string()) - .arg("--http-rollup-server-port") - .arg(config::HTTP_ROLLUP_SERVER_PORT.to_string()) - .arg("--finish-timeout") - .arg(config::FINISH_TIMEOUT.to_string()); - if !config::get_test_verbose() { - command.stdout(Stdio::null()).stderr(Stdio::null()); - } - // Wait for a bit to clean up the port from previous test - tokio::time::sleep(Duration::from_millis(10)).await; - let child = command.spawn().expect("failed to start manager process"); - wait_for_manager().await; - Self { child } - } -} - -impl Drop for Wrapper { - fn drop(&mut self) { - self.child.kill().expect("failed to kill manager process"); - } -} - -async fn wait_for_manager() { - const RETRIES: u64 = 100; - for _ in 0..RETRIES { - let address = config::get_grpc_server_manager_address(); - if let Ok(mut client) = ServerManagerClient::connect(address).await { - if let Ok(_) = client.get_version(Void {}).await { - return; - } - } - tokio::time::sleep(Duration::from_millis(10)).await; - } - panic!("manager timed out"); -} diff --git a/offchain/host-runner/tests/common/mod.rs b/offchain/host-runner/tests/common/mod.rs deleted file mode 100644 index 599ecae71..000000000 --- a/offchain/host-runner/tests/common/mod.rs +++ /dev/null @@ -1,71 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -#![allow(dead_code)] - -pub mod config; -pub mod grpc_client; -pub mod http_client; -pub mod manager; - -pub fn create_address() -> Vec { - rand::random::<[u8; 20]>().into() -} - -pub fn create_payload() -> Vec { - rand::random::<[u8; 16]>().into() -} - -pub async fn setup_advance_state( - grpc_client: &mut grpc_client::ServerManagerClient, - session_id: &str, -) { - grpc_client - .start_session(grpc_client::create_start_session_request(session_id)) - .await - .unwrap(); - grpc_client - .advance_state(grpc_client::create_advance_state_request( - session_id, 0, 0, - )) - .await - .unwrap(); - http_client::finish("accept".into()).await.unwrap(); -} - -pub async fn finish_advance_state( - grpc_client: &mut grpc_client::ServerManagerClient, - session_id: &str, -) -> Option { - // Send a finish request in a separate thread. - let handle = tokio::spawn(http_client::finish("accept".into())); - - // Wait for the input to be processed. - const RETRIES: i32 = 10; - let mut processed = None; - for _ in 0..RETRIES { - processed = grpc_client - .get_epoch_status(grpc_client::GetEpochStatusRequest { - session_id: session_id.into(), - epoch_index: 0, - }) - .await - .unwrap() - .into_inner() - .processed_inputs - .pop(); - if processed.is_some() { - break; - } - tokio::time::sleep(std::time::Duration::from_millis(10)).await; - } - - // Wait for the finish to return. - // It should return error because there isn't a next request to be processed. - handle - .await - .expect("tokio spawn failed") - .expect_err("finish should return error"); - - processed -} diff --git a/offchain/host-runner/tests/grpc.rs b/offchain/host-runner/tests/grpc.rs deleted file mode 100644 index 7df179239..000000000 --- a/offchain/host-runner/tests/grpc.rs +++ /dev/null @@ -1,17 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod common; - -mod grpc_tests { - mod advance_state; - mod delete_epoch; - mod end_session; - mod finish_epoch; - mod get_epoch_status; - mod get_session_status; - mod get_status; - mod get_version; - mod inspect_state; - mod start_session; -} diff --git a/offchain/host-runner/tests/grpc_tests/advance_state.rs b/offchain/host-runner/tests/grpc_tests/advance_state.rs deleted file mode 100644 index 3cd352993..000000000 --- a/offchain/host-runner/tests/grpc_tests/advance_state.rs +++ /dev/null @@ -1,58 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_advances_state() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let response = grpc_client - .advance_state(grpc_client::create_advance_state_request( - "rollup session", - 0, - 0, - )) - .await - .unwrap() - .into_inner(); - assert_eq!(response, grpc_client::Void {}); - // Check if state changed to advance with HTTP finish - let response = http_client::finish("accept".into()).await.unwrap(); - assert!(matches!( - response, - http_client::RollupHttpRequest::Advance { .. } - )); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_advance_request_with_wrong_parameters() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let invalid_requests = vec![ - // Wrong session id - grpc_client::create_advance_state_request("rollup session 1", 0, 0), - // Wrong epoch number - grpc_client::create_advance_state_request("rollup session", 123, 0), - // Wrong input index - grpc_client::create_advance_state_request("rollup session", 0, 123), - ]; - for request in invalid_requests { - let err = grpc_client.advance_state(request).await.unwrap_err(); - assert_eq!(err.code(), tonic::Code::InvalidArgument); - } -} diff --git a/offchain/host-runner/tests/grpc_tests/delete_epoch.rs b/offchain/host-runner/tests/grpc_tests/delete_epoch.rs deleted file mode 100644 index e64e9fbaf..000000000 --- a/offchain/host-runner/tests/grpc_tests/delete_epoch.rs +++ /dev/null @@ -1,116 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use serial_test::serial; -use tonic::Code; - -use crate::common::{ - grpc_client::{self, DeleteEpochRequest, FinishEpochRequest}, - manager, -}; - -const SESSION_ID: &str = "rollup session"; - -#[tokio::test] -#[serial] -async fn test_it_fails_to_delete_active_epoch() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - - let err = grpc_client - .delete_epoch(DeleteEpochRequest { - epoch_index: 0, - session_id: SESSION_ID.into(), - }) - .await - .expect_err("should fail to delete epoch"); - - assert_eq!(err.code(), Code::InvalidArgument); -} - -#[tokio::test] -#[serial] -async fn test_it_fails_to_delete_unexisting_epoch() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .expect("should start session"); - - let err = grpc_client - .delete_epoch(DeleteEpochRequest { - epoch_index: 1, - session_id: SESSION_ID.into(), - }) - .await - .expect_err("should fail to delete epoch"); - - assert_eq!(err.code(), Code::InvalidArgument); -} - -#[tokio::test] -#[serial] -async fn test_it_fails_to_delete_when_there_is_no_session() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - - let err = grpc_client - .delete_epoch(DeleteEpochRequest { - epoch_index: 1, - session_id: SESSION_ID.into(), - }) - .await - .expect_err("should fail to delete epoch"); - - assert_eq!(err.code(), Code::InvalidArgument); -} - -#[tokio::test] -#[serial] -async fn test_it_deletes_epoch() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .expect("should start session"); - - grpc_client - .finish_epoch(FinishEpochRequest { - session_id: SESSION_ID.into(), - active_epoch_index: 0, - processed_input_count_within_epoch: 0, - storage_directory: "".into(), - }) - .await - .expect("should finish epoch"); - - let response = grpc_client - .delete_epoch(DeleteEpochRequest { - epoch_index: 0, - session_id: SESSION_ID.into(), - }) - .await; - assert!(response.is_ok()); - - let err = grpc_client - .get_epoch_status(grpc_client::GetEpochStatusRequest { - epoch_index: 0, - session_id: SESSION_ID.into(), - }) - .await - .expect_err("epoch should have been deleted"); - - assert_eq!(err.code(), Code::InvalidArgument); -} diff --git a/offchain/host-runner/tests/grpc_tests/end_session.rs b/offchain/host-runner/tests/grpc_tests/end_session.rs deleted file mode 100644 index 8a8ba0065..000000000 --- a/offchain/host-runner/tests/grpc_tests/end_session.rs +++ /dev/null @@ -1,50 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_ends_existing_session() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - grpc_client - .end_session(grpc_client::EndSessionRequest { - session_id: "rollup session".into(), - }) - .await - .unwrap(); - let err = grpc_client - .end_session(grpc_client::EndSessionRequest { - session_id: "rollup session".into(), - }) - .await - .unwrap_err(); - assert_eq!(err.code(), tonic::Code::InvalidArgument); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_ends_non_existing_session() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session 1", - )) - .await - .unwrap(); - let err = grpc_client - .end_session(grpc_client::EndSessionRequest { - session_id: "rollup session 2".into(), - }) - .await - .unwrap_err(); - assert_eq!(err.code(), tonic::Code::InvalidArgument); -} diff --git a/offchain/host-runner/tests/grpc_tests/finish_epoch.rs b/offchain/host-runner/tests/grpc_tests/finish_epoch.rs deleted file mode 100644 index 400c0c0df..000000000 --- a/offchain/host-runner/tests/grpc_tests/finish_epoch.rs +++ /dev/null @@ -1,360 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::{grpc_client::FinishEpochResponse, *}; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_finishes_existing_epoch() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let result = grpc_client - .finish_epoch(grpc_client::FinishEpochRequest { - session_id: "rollup session".into(), - active_epoch_index: 0, - processed_input_count_within_epoch: 0, - storage_directory: "".into(), - }) - .await - .unwrap() - .into_inner(); - let expected_result = FinishEpochResponse { - machine_hash: Some(grpc_client::Hash { - data: vec![0 as u8; 32], - }), - vouchers_epoch_root_hash: Some(grpc_client::Hash { - data: vec![ - 207, 39, 127, 184, 10, 130, 71, 132, 96, 232, 152, 133, 112, - 183, 24, 241, 224, 131, 206, 183, 111, 126, 39, 26, 26, 20, - 151, 229, 151, 95, 83, 174, - ], - }), - notices_epoch_root_hash: Some(grpc_client::Hash { - data: vec![ - 207, 39, 127, 184, 10, 130, 71, 132, 96, 232, 152, 133, 112, - 183, 24, 241, 224, 131, 206, 183, 111, 126, 39, 26, 26, 20, - 151, 229, 151, 95, 83, 174, - ], - }), - proofs: vec![], - }; - assert_eq_finish_epoch_response(result, expected_result); - - let response = grpc_client - .get_epoch_status(grpc_client::GetEpochStatusRequest { - session_id: "rollup session".into(), - epoch_index: 0, - }) - .await - .unwrap() - .into_inner(); - assert_eq!(response.state, grpc_client::EpochState::Finished as i32); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_finishes_existing_epoch_with_outputs() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - let destination = String::from("0x") + &"fa".repeat(20); - http_client::insert_voucher(destination, "0xdeadbeef".into()) - .await - .unwrap(); - http_client::insert_notice("0xdeadbeef".into()) - .await - .unwrap(); - finish_advance_state(&mut grpc_client, "rollup session").await; - - let result = grpc_client - .finish_epoch(grpc_client::FinishEpochRequest { - session_id: "rollup session".into(), - active_epoch_index: 0, - processed_input_count_within_epoch: 1, - storage_directory: "".into(), - }) - .await - .unwrap() - .into_inner(); - - let machine_hash = grpc_client::Hash { - data: vec![0 as u8; 32], - }; - let vouchers_epoch_root_hash = decode_hash( - "29676ea41aaf54b4d66d45bc60b9c8f71b5f9166035d375626746e7396baa7a1", - ); - let notices_epoch_root_hash = decode_hash( - "63a367741b1feb9c2dc64bda8ac4a083ebbe5fd1f7bb4746e94597c988f30197", - ); - let context = vec![0 as u8; 32]; - let expected_result = FinishEpochResponse { - machine_hash: Some(machine_hash.clone()), - vouchers_epoch_root_hash: Some(vouchers_epoch_root_hash.clone()), - notices_epoch_root_hash: Some(notices_epoch_root_hash.clone()), - proofs: vec![ - grpc_client::Proof { - input_index: 0, - output_index: 0, - output_enum: grpc_client::OutputEnum::Voucher.into(), - validity: Some(grpc_client::OutputValidityProof { - input_index_within_epoch: 0, - output_index_within_input: 0, - output_hashes_root_hash: Some(decode_hash( - "bf21d3dd50b9c5e542ea86c0f555b1bde6373829b59f51afd4a95eef24f05245", - )), - vouchers_epoch_root_hash: Some(vouchers_epoch_root_hash.clone()), - notices_epoch_root_hash: Some(notices_epoch_root_hash.clone()), - machine_state_hash: Some(machine_hash.clone()), - output_hash_in_output_hashes_siblings: vec![ - decode_hash("ae39ce8537aca75e2eff3e38c98011dfe934e700a0967732fc07b430dd656a23"), - decode_hash("3fc9a15f5b4869c872f81087bb6104b7d63e6f9ab47f2c43f3535eae7172aa7f"), - decode_hash("17d2dd614cddaa4d879276b11e0672c9560033d3e8453a1d045339d34ba601b9"), - decode_hash("c37b8b13ca95166fb7af16988a70fcc90f38bf9126fd833da710a47fb37a55e6"), - decode_hash("8e7a427fa943d9966b389f4f257173676090c6e95f43e2cb6d65f8758111e309"), - decode_hash("30b0b9deb73e155c59740bacf14a6ff04b64bb8e201a506409c3fe381ca4ea90"), - decode_hash("cd5deac729d0fdaccc441d09d7325f41586ba13c801b7eccae0f95d8f3933efe"), - decode_hash("d8b96e5b7f6f459e9cb6a2f41bf276c7b85c10cd4662c04cbbb365434726c0a0"), - decode_hash("c9695393027fb106a8153109ac516288a88b28a93817899460d6310b71cf1e61"), - decode_hash("63e8806fa0d4b197a259e8c3ac28864268159d0ac85f8581ca28fa7d2c0c03eb"), - decode_hash("91e3eee5ca7a3da2b3053c9770db73599fb149f620e3facef95e947c0ee860b7"), - decode_hash("2122e31e4bbd2b7c783d79cc30f60c6238651da7f0726f767d22747264fdb046"), - decode_hash("f7549f26cc70ed5e18baeb6c81bb0625cb95bb4019aeecd40774ee87ae29ec51"), - decode_hash("7a71f6ee264c5d761379b3d7d617ca83677374b49d10aec50505ac087408ca89"), - decode_hash("2b573c267a712a52e1d06421fe276a03efb1889f337201110fdc32a81f8e1524"), - decode_hash("99af665835aabfdc6740c7e2c3791a31c3cdc9f5ab962f681b12fc092816a62f"), - ], - output_hashes_in_epoch_siblings: vec![ - decode_hash("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"), - decode_hash("633dc4d7da7256660a892f8f1604a44b5432649cc8ec5cb3ced4c4e6ac94dd1d"), - decode_hash("890740a8eb06ce9be422cb8da5cdafc2b58c0a5e24036c578de2a433c828ff7d"), - decode_hash("3b8ec09e026fdc305365dfc94e189a81b38c7597b3d941c279f042e8206e0bd8"), - decode_hash("ecd50eee38e386bd62be9bedb990706951b65fe053bd9d8a521af753d139e2da"), - decode_hash("defff6d330bb5403f63b14f33b578274160de3a50df4efecf0e0db73bcdd3da5"), - decode_hash("617bdd11f7c0a11f49db22f629387a12da7596f9d1704d7465177c63d88ec7d7"), - decode_hash("292c23a9aa1d8bea7e2435e555a4a60e379a5a35f3f452bae60121073fb6eead"), - decode_hash("e1cea92ed99acdcb045a6726b2f87107e8a61620a232cf4d7d5b5766b3952e10"), - decode_hash("7ad66c0a68c72cb89e4fb4303841966e4062a76ab97451e3b9fb526a5ceb7f82"), - decode_hash("e026cc5a4aed3c22a58cbd3d2ac754c9352c5436f638042dca99034e83636516"), - decode_hash("3d04cffd8b46a874edf5cfae63077de85f849a660426697b06a829c70dd1409c"), - decode_hash("ad676aa337a485e4728a0b240d92b3ef7b3c372d06d189322bfd5f61f1e7203e"), - decode_hash("a2fca4a49658f9fab7aa63289c91b7c7b6c832a6d0e69334ff5b0a3483d09dab"), - decode_hash("4ebfd9cd7bca2505f7bef59cc1c12ecc708fff26ae4af19abe852afe9e20c862"), - decode_hash("2def10d13dd169f550f578bda343d9717a138562e0093b380a1120789d53cf10"), - decode_hash("776a31db34a1a0a7caaf862cffdfff1789297ffadc380bd3d39281d340abd3ad"), - decode_hash("e2e7610b87a5fdf3a72ebe271287d923ab990eefac64b6e59d79f8b7e08c46e3"), - decode_hash("504364a5c6858bf98fff714ab5be9de19ed31a976860efbd0e772a2efe23e2e0"), - decode_hash("4f05f4acb83f5b65168d9fef89d56d4d77b8944015e6b1eed81b0238e2d0dba3"), - decode_hash("44a6d974c75b07423e1d6d33f481916fdd45830aea11b6347e700cd8b9f0767c"), - decode_hash("edf260291f734ddac396a956127dde4c34c0cfb8d8052f88ac139658ccf2d507"), - decode_hash("6075c657a105351e7f0fce53bc320113324a522e8fd52dc878c762551e01a46e"), - decode_hash("6ca6a3f763a9395f7da16014725ca7ee17e4815c0ff8119bf33f273dee11833b"), - decode_hash("1c25ef10ffeb3c7d08aa707d17286e0b0d3cbcb50f1bd3b6523b63ba3b52dd0f"), - decode_hash("fffc43bd08273ccf135fd3cacbeef055418e09eb728d727c4d5d5c556cdea7e3"), - decode_hash("c5ab8111456b1f28f3c7a0a604b4553ce905cb019c463ee159137af83c350b22"), - decode_hash("0ff273fcbf4ae0f2bd88d6cf319ff4004f8d7dca70d4ced4e74d2c74139739e6"), - decode_hash("7fa06ba11241ddd5efdc65d4e39c9f6991b74fd4b81b62230808216c876f827c"), - decode_hash("7e275adf313a996c7e2950cac67caba02a5ff925ebf9906b58949f3e77aec5b9"), - decode_hash("8f6162fa308d2b3a15dc33cffac85f13ab349173121645aedf00f471663108be"), - decode_hash("78ccaaab73373552f207a63599de54d7d8d0c1805f86ce7da15818d09f4cff62"), - ], - }), - context: context.clone(), - }, - grpc_client::Proof { - input_index: 0, - output_index: 0, - output_enum: grpc_client::OutputEnum::Notice.into(), - validity: Some(grpc_client::OutputValidityProof { - input_index_within_epoch: 0, - output_index_within_input: 0, - output_hashes_root_hash: Some(decode_hash( - "660c2d35b0a43d8179792345211d0eab28d88f47fafadd8334b80196cad41ded", - )), - vouchers_epoch_root_hash: Some(vouchers_epoch_root_hash.clone()), - notices_epoch_root_hash: Some(notices_epoch_root_hash.clone()), - machine_state_hash: Some(machine_hash.clone()), - output_hash_in_output_hashes_siblings: vec![ - decode_hash("ae39ce8537aca75e2eff3e38c98011dfe934e700a0967732fc07b430dd656a23"), - decode_hash("3fc9a15f5b4869c872f81087bb6104b7d63e6f9ab47f2c43f3535eae7172aa7f"), - decode_hash("17d2dd614cddaa4d879276b11e0672c9560033d3e8453a1d045339d34ba601b9"), - decode_hash("c37b8b13ca95166fb7af16988a70fcc90f38bf9126fd833da710a47fb37a55e6"), - decode_hash("8e7a427fa943d9966b389f4f257173676090c6e95f43e2cb6d65f8758111e309"), - decode_hash("30b0b9deb73e155c59740bacf14a6ff04b64bb8e201a506409c3fe381ca4ea90"), - decode_hash("cd5deac729d0fdaccc441d09d7325f41586ba13c801b7eccae0f95d8f3933efe"), - decode_hash("d8b96e5b7f6f459e9cb6a2f41bf276c7b85c10cd4662c04cbbb365434726c0a0"), - decode_hash("c9695393027fb106a8153109ac516288a88b28a93817899460d6310b71cf1e61"), - decode_hash("63e8806fa0d4b197a259e8c3ac28864268159d0ac85f8581ca28fa7d2c0c03eb"), - decode_hash("91e3eee5ca7a3da2b3053c9770db73599fb149f620e3facef95e947c0ee860b7"), - decode_hash("2122e31e4bbd2b7c783d79cc30f60c6238651da7f0726f767d22747264fdb046"), - decode_hash("f7549f26cc70ed5e18baeb6c81bb0625cb95bb4019aeecd40774ee87ae29ec51"), - decode_hash("7a71f6ee264c5d761379b3d7d617ca83677374b49d10aec50505ac087408ca89"), - decode_hash("2b573c267a712a52e1d06421fe276a03efb1889f337201110fdc32a81f8e1524"), - decode_hash("99af665835aabfdc6740c7e2c3791a31c3cdc9f5ab962f681b12fc092816a62f"), - ], - output_hashes_in_epoch_siblings: vec![ - decode_hash("290decd9548b62a8d60345a988386fc84ba6bc95484008f6362f93160ef3e563"), - decode_hash("633dc4d7da7256660a892f8f1604a44b5432649cc8ec5cb3ced4c4e6ac94dd1d"), - decode_hash("890740a8eb06ce9be422cb8da5cdafc2b58c0a5e24036c578de2a433c828ff7d"), - decode_hash("3b8ec09e026fdc305365dfc94e189a81b38c7597b3d941c279f042e8206e0bd8"), - decode_hash("ecd50eee38e386bd62be9bedb990706951b65fe053bd9d8a521af753d139e2da"), - decode_hash("defff6d330bb5403f63b14f33b578274160de3a50df4efecf0e0db73bcdd3da5"), - decode_hash("617bdd11f7c0a11f49db22f629387a12da7596f9d1704d7465177c63d88ec7d7"), - decode_hash("292c23a9aa1d8bea7e2435e555a4a60e379a5a35f3f452bae60121073fb6eead"), - decode_hash("e1cea92ed99acdcb045a6726b2f87107e8a61620a232cf4d7d5b5766b3952e10"), - decode_hash("7ad66c0a68c72cb89e4fb4303841966e4062a76ab97451e3b9fb526a5ceb7f82"), - decode_hash("e026cc5a4aed3c22a58cbd3d2ac754c9352c5436f638042dca99034e83636516"), - decode_hash("3d04cffd8b46a874edf5cfae63077de85f849a660426697b06a829c70dd1409c"), - decode_hash("ad676aa337a485e4728a0b240d92b3ef7b3c372d06d189322bfd5f61f1e7203e"), - decode_hash("a2fca4a49658f9fab7aa63289c91b7c7b6c832a6d0e69334ff5b0a3483d09dab"), - decode_hash("4ebfd9cd7bca2505f7bef59cc1c12ecc708fff26ae4af19abe852afe9e20c862"), - decode_hash("2def10d13dd169f550f578bda343d9717a138562e0093b380a1120789d53cf10"), - decode_hash("776a31db34a1a0a7caaf862cffdfff1789297ffadc380bd3d39281d340abd3ad"), - decode_hash("e2e7610b87a5fdf3a72ebe271287d923ab990eefac64b6e59d79f8b7e08c46e3"), - decode_hash("504364a5c6858bf98fff714ab5be9de19ed31a976860efbd0e772a2efe23e2e0"), - decode_hash("4f05f4acb83f5b65168d9fef89d56d4d77b8944015e6b1eed81b0238e2d0dba3"), - decode_hash("44a6d974c75b07423e1d6d33f481916fdd45830aea11b6347e700cd8b9f0767c"), - decode_hash("edf260291f734ddac396a956127dde4c34c0cfb8d8052f88ac139658ccf2d507"), - decode_hash("6075c657a105351e7f0fce53bc320113324a522e8fd52dc878c762551e01a46e"), - decode_hash("6ca6a3f763a9395f7da16014725ca7ee17e4815c0ff8119bf33f273dee11833b"), - decode_hash("1c25ef10ffeb3c7d08aa707d17286e0b0d3cbcb50f1bd3b6523b63ba3b52dd0f"), - decode_hash("fffc43bd08273ccf135fd3cacbeef055418e09eb728d727c4d5d5c556cdea7e3"), - decode_hash("c5ab8111456b1f28f3c7a0a604b4553ce905cb019c463ee159137af83c350b22"), - decode_hash("0ff273fcbf4ae0f2bd88d6cf319ff4004f8d7dca70d4ced4e74d2c74139739e6"), - decode_hash("7fa06ba11241ddd5efdc65d4e39c9f6991b74fd4b81b62230808216c876f827c"), - decode_hash("7e275adf313a996c7e2950cac67caba02a5ff925ebf9906b58949f3e77aec5b9"), - decode_hash("8f6162fa308d2b3a15dc33cffac85f13ab349173121645aedf00f471663108be"), - decode_hash("78ccaaab73373552f207a63599de54d7d8d0c1805f86ce7da15818d09f4cff62"), - ], - }), - context: context.clone(), - }, - ], - }; - - assert_eq_finish_epoch_response(result, expected_result); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_finish_unexistent_epoch() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let err = grpc_client - .finish_epoch(grpc_client::FinishEpochRequest { - session_id: "rollup session".into(), - active_epoch_index: 10, - processed_input_count_within_epoch: 0, - storage_directory: "".into(), - }) - .await - .unwrap_err(); - assert_eq!(err.code(), tonic::Code::InvalidArgument); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_updates_input_index_after_finishing_epoch() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - let session_id = "rollup session"; - - setup_advance_state(&mut grpc_client, session_id).await; - finish_advance_state(&mut grpc_client, session_id).await; - - grpc_client - .finish_epoch(grpc_client::FinishEpochRequest { - active_epoch_index: 0, - session_id: session_id.into(), - processed_input_count_within_epoch: 1, - storage_directory: "".into(), - }) - .await - .unwrap(); - - grpc_client - .advance_state(grpc_client::create_advance_state_request( - session_id, 1, 1, - )) - .await - .unwrap(); - - http_client::finish("accept".into()).await.unwrap(); - finish_advance_state(&mut grpc_client, session_id).await; - - let epoch_status = grpc_client - .get_epoch_status(grpc_client::GetEpochStatusRequest { - session_id: "rollup session".into(), - epoch_index: 1, - }) - .await - .unwrap() - .into_inner(); - - assert_eq!(epoch_status.processed_inputs[0].input_index, 1); -} - -fn assert_eq_finish_epoch_response( - lhs: FinishEpochResponse, - rhs: FinishEpochResponse, -) { - assert_eq!(lhs.machine_hash, rhs.machine_hash); - assert_eq!(lhs.notices_epoch_root_hash, rhs.notices_epoch_root_hash); - assert_eq!(lhs.vouchers_epoch_root_hash, rhs.vouchers_epoch_root_hash); - assert_eq!(lhs.proofs.len(), rhs.proofs.len()); - - for (idx, proof) in lhs.proofs.into_iter().enumerate() { - assert_eq_proof(proof, rhs.proofs[idx].clone()); - } -} - -fn assert_eq_proof(lhs: grpc_client::Proof, rhs: grpc_client::Proof) { - assert_eq!(lhs.context, rhs.context); - assert_eq!(lhs.input_index, rhs.input_index); - assert_eq!(lhs.output_index, rhs.output_index); - assert_eq!(lhs.output_enum(), rhs.output_enum()); - assert_eq_validity_proof( - lhs.validity.expect("should contain OutputValidityProof"), - rhs.validity.expect("should contain OutputValidityProof"), - ); -} - -fn assert_eq_validity_proof( - lhs: grpc_client::OutputValidityProof, - rhs: grpc_client::OutputValidityProof, -) { - assert_eq!(lhs.input_index_within_epoch, rhs.input_index_within_epoch); - assert_eq!(lhs.output_index_within_input, rhs.output_index_within_input); - assert_eq!(lhs.machine_state_hash, rhs.machine_state_hash); - assert_eq!(lhs.notices_epoch_root_hash, rhs.notices_epoch_root_hash); - assert_eq!(lhs.vouchers_epoch_root_hash, rhs.vouchers_epoch_root_hash); - for (h_idx, hash) in lhs - .output_hash_in_output_hashes_siblings - .into_iter() - .enumerate() - { - assert_eq!(hash, rhs.output_hash_in_output_hashes_siblings[h_idx]); - } - for (h_idx, hash) in - lhs.output_hashes_in_epoch_siblings.into_iter().enumerate() - { - assert_eq!(hash, rhs.output_hashes_in_epoch_siblings[h_idx]); - } -} - -fn decode_hash(s: &str) -> grpc_client::Hash { - grpc_client::Hash { - data: hex::decode(s).unwrap(), - } -} diff --git a/offchain/host-runner/tests/grpc_tests/get_epoch_status.rs b/offchain/host-runner/tests/grpc_tests/get_epoch_status.rs deleted file mode 100644 index 849e407bd..000000000 --- a/offchain/host-runner/tests/grpc_tests/get_epoch_status.rs +++ /dev/null @@ -1,118 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_get_epoch_status_of_empty_epoch() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let response = grpc_client - .get_epoch_status(grpc_client::GetEpochStatusRequest { - session_id: "rollup session".into(), - epoch_index: 0, - }) - .await - .unwrap() - .into_inner(); - assert_eq!( - response, - grpc_client::GetEpochStatusResponse { - session_id: "rollup session".into(), - epoch_index: 0, - state: grpc_client::EpochState::Active as i32, - processed_inputs: vec![], - pending_input_count: 0, - taint_status: None, - } - ); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_get_epoch_status_of_epoch_with_voucher_notice_and_report() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - let destination = String::from("0x") + &"fa".repeat(20); - http_client::insert_voucher(destination, "0xdeadbeef".into()) - .await - .unwrap(); - http_client::insert_notice("0xdeadbeef".into()) - .await - .unwrap(); - http_client::insert_report("0xdeadbeef".into()) - .await - .unwrap(); - finish_advance_state(&mut grpc_client, "rollup session").await; - let response = grpc_client - .get_epoch_status(grpc_client::GetEpochStatusRequest { - session_id: "rollup session".into(), - epoch_index: 0, - }) - .await - .unwrap() - .into_inner(); - let expected = grpc_client::GetEpochStatusResponse { - session_id: "rollup session".into(), - epoch_index: 0, - state: grpc_client::EpochState::Active as i32, - processed_inputs: vec![grpc_client::ProcessedInput { - input_index: 0, - reports: vec![grpc_client::Report { - payload: vec![222, 173, 190, 239], - }], - status: grpc_client::CompletionStatus::Accepted as i32, - processed_input_one_of: Some( - grpc_client::processed_input::ProcessedInputOneOf::AcceptedData( - grpc_client::AcceptedData { - vouchers: vec![grpc_client::Voucher { - destination: Some(grpc_client::Address { - data: vec![ - 250, 250, 250, 250, 250, 250, 250, 250, - 250, 250, 250, 250, 250, 250, 250, 250, - 250, 250, 250, 250, - ], - }), - payload: vec![222, 173, 190, 239], - }], - notices: vec![grpc_client::Notice { - payload: vec![222, 173, 190, 239], - }], - }, - ), - ), - }], - pending_input_count: 0, - taint_status: None, - }; - assert_eq!(response, expected); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_get_non_existent_epoch_status() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let err = grpc_client - .get_epoch_status(grpc_client::GetEpochStatusRequest { - session_id: "rollup session".into(), - epoch_index: 123, - }) - .await - .unwrap_err(); - assert_eq!(err.code(), tonic::Code::InvalidArgument); -} diff --git a/offchain/host-runner/tests/grpc_tests/get_session_status.rs b/offchain/host-runner/tests/grpc_tests/get_session_status.rs deleted file mode 100644 index 0dfd2a07b..000000000 --- a/offchain/host-runner/tests/grpc_tests/get_session_status.rs +++ /dev/null @@ -1,88 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_gets_session_status_with_no_advance_requests() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let response = grpc_client - .get_session_status(grpc_client::GetSessionStatusRequest { - session_id: "rollup session".into(), - }) - .await - .unwrap() - .into_inner(); - assert_eq!( - response, - grpc_client::GetSessionStatusResponse { - session_id: "rollup session".into(), - active_epoch_index: 0, - epoch_index: vec![0], - taint_status: None, - } - ) -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_gets_session_with_multiple_epochs() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - const N: u64 = 10; - for i in 0..N { - grpc_client - .finish_epoch(grpc_client::FinishEpochRequest { - session_id: "rollup session".into(), - active_epoch_index: i, - processed_input_count_within_epoch: 0, - storage_directory: "".into(), - }) - .await - .unwrap(); - } - let response = grpc_client - .get_session_status(grpc_client::GetSessionStatusRequest { - session_id: "rollup session".into(), - }) - .await - .unwrap() - .into_inner(); - assert_eq!( - response, - grpc_client::GetSessionStatusResponse { - session_id: "rollup session".into(), - active_epoch_index: N, - epoch_index: (0..N + 1).collect::>(), - taint_status: None, - } - ) -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_get_session_status_of_unexistent_session() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - let err = grpc_client - .get_session_status(grpc_client::GetSessionStatusRequest { - session_id: "rollup session".into(), - }) - .await - .unwrap_err(); - assert_eq!(err.code(), tonic::Code::InvalidArgument); -} diff --git a/offchain/host-runner/tests/grpc_tests/get_status.rs b/offchain/host-runner/tests/grpc_tests/get_status.rs deleted file mode 100644 index 322391d2a..000000000 --- a/offchain/host-runner/tests/grpc_tests/get_status.rs +++ /dev/null @@ -1,44 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_gets_status_with_no_sessions_running() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - let response = grpc_client - .get_status(grpc_client::Void {}) - .await - .unwrap() - .into_inner(); - assert_eq!( - response, - grpc_client::GetStatusResponse { session_id: vec![] } - ); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_gets_status_with_a_single_session_running() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let response = grpc_client - .get_status(grpc_client::Void {}) - .await - .unwrap() - .into_inner(); - assert_eq!( - response, - grpc_client::GetStatusResponse { - session_id: vec![String::from("rollup session")] - } - ); -} diff --git a/offchain/host-runner/tests/grpc_tests/get_version.rs b/offchain/host-runner/tests/grpc_tests/get_version.rs deleted file mode 100644 index f8d9f0a54..000000000 --- a/offchain/host-runner/tests/grpc_tests/get_version.rs +++ /dev/null @@ -1,29 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; -use grpc_interfaces::versioning::{GetVersionResponse, SemanticVersion}; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_gets_version() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - let response = grpc_client - .get_version(grpc_client::Void {}) - .await - .unwrap() - .into_inner(); - assert_eq!( - response, - GetVersionResponse { - version: Some(SemanticVersion { - major: 0, - minor: 2, - patch: 0, - pre_release: String::from(""), - build: String::from("host-runner"), - }) - } - ); -} diff --git a/offchain/host-runner/tests/grpc_tests/inspect_state.rs b/offchain/host-runner/tests/grpc_tests/inspect_state.rs deleted file mode 100644 index fcb36623e..000000000 --- a/offchain/host-runner/tests/grpc_tests/inspect_state.rs +++ /dev/null @@ -1,169 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_inspects_and_receive_a_report() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - // Send the inspect request in a separate thread - let inspect_handle = tokio::spawn(async move { - grpc_client - .inspect_state(grpc_client::InspectStateRequest { - session_id: "rollup session".into(), - query_payload: create_payload(), - }) - .await - .unwrap() - .into_inner() - }); - // Send HTTP requests - while let Err(e) = http_client::finish("accept".into()).await { - assert_eq!(e.status, 202); - } - let payload = create_payload(); - http_client::insert_report(http_client::convert_binary_to_hex(&payload)) - .await - .unwrap(); - http_client::finish("accept".into()).await.unwrap_err(); - // Obtain the inspect response and check it - let response = inspect_handle.await.unwrap(); - let expected = grpc_client::InspectStateResponse { - session_id: String::from("rollup session"), - active_epoch_index: 0, - processed_input_count: 0, - status: grpc_client::CompletionStatus::Accepted as i32, - exception_data: None, - reports: vec![grpc_client::Report { payload }], - }; - assert_eq!(response, expected); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_reports_session_state_correctly() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - - // Send an input and finish the first epoch - setup_advance_state(&mut grpc_client, "rollup session").await; - finish_advance_state(&mut grpc_client, "rollup session").await; - grpc_client - .finish_epoch(grpc_client::FinishEpochRequest { - session_id: "rollup session".into(), - active_epoch_index: 0, - processed_input_count_within_epoch: 1, - storage_directory: "".into(), - }) - .await - .expect("should finish epoch"); - - // Send an inspect request in the second epoch - let inspect_handle = tokio::spawn(async move { - grpc_client - .inspect_state(grpc_client::InspectStateRequest { - session_id: "rollup session".into(), - query_payload: create_payload(), - }) - .await - .unwrap() - .into_inner() - }); - - // Get inspect request state request - http_client::finish("accept".into()).await.unwrap(); - - // Accept inspect request - http_client::finish("accept".into()).await.unwrap_err(); - - let response = inspect_handle.await.unwrap(); - let expected = grpc_client::InspectStateResponse { - session_id: String::from("rollup session"), - active_epoch_index: 1, - processed_input_count: 1, - status: grpc_client::CompletionStatus::Accepted as i32, - exception_data: None, - reports: vec![], - }; - assert_eq!(response, expected); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_inspect_state_concurrently() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - // Send the inspect request in a separate thread - let inspect_handle = { - let mut grpc_client = grpc_client.clone(); - tokio::spawn(async move { - grpc_client - .inspect_state(grpc_client::InspectStateRequest { - session_id: "rollup session".into(), - query_payload: create_payload(), - }) - .await - .unwrap() - .into_inner() - }) - }; - // Wait until the first request starts to be processed - while let Err(e) = http_client::finish("accept".into()).await { - assert_eq!(e.status, 202); - } - // Send second inspect request - let status = grpc_client - .inspect_state(grpc_client::InspectStateRequest { - session_id: "rollup session".into(), - query_payload: create_payload(), - }) - .await - .unwrap_err(); - assert_eq!(status.code(), tonic::Code::Aborted); - assert_eq!(status.message(), "concurrent call in session"); - // Finish first inspect request - http_client::finish("accept".into()).await.unwrap_err(); - inspect_handle.await.unwrap(); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_queue_inspect_during_advance_state() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - // Start advance state - setup_advance_state(&mut grpc_client, "rollup session").await; - // Send the inspect request in a separate thread - let inspect_handle = { - let mut grpc_client = grpc_client.clone(); - tokio::spawn(async move { - grpc_client - .inspect_state(grpc_client::InspectStateRequest { - session_id: "rollup session".into(), - query_payload: create_payload(), - }) - .await - .unwrap() - .into_inner() - }) - }; - // Finish advance request and start inspect request - http_client::finish("accept".into()).await.unwrap(); - // Finish inspect request - http_client::finish("accept".into()).await.unwrap_err(); - inspect_handle.await.unwrap(); -} diff --git a/offchain/host-runner/tests/grpc_tests/start_session.rs b/offchain/host-runner/tests/grpc_tests/start_session.rs deleted file mode 100644 index ebfeaaa3c..000000000 --- a/offchain/host-runner/tests/grpc_tests/start_session.rs +++ /dev/null @@ -1,60 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_starts_session_with_new_valid_session() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - let response = grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap() - .into_inner(); - assert_eq!(response, grpc_client::StartSessionResponse { config: None }); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_start_session_with_same_name() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let err = grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap_err(); - assert_eq!(err.code(), tonic::Code::AlreadyExists); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_start_multiple_sessions_because_it_supports_only_one() -{ - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session 1", - )) - .await - .unwrap(); - let err = grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session 2", - )) - .await - .unwrap_err(); - assert_eq!(err.code(), tonic::Code::AlreadyExists); -} diff --git a/offchain/host-runner/tests/http.rs b/offchain/host-runner/tests/http.rs deleted file mode 100644 index 4ada7a06c..000000000 --- a/offchain/host-runner/tests/http.rs +++ /dev/null @@ -1,12 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod common; - -mod http_tests { - mod exception; - mod finish; - mod notice; - mod report; - mod voucher; -} diff --git a/offchain/host-runner/tests/http_tests/exception.rs b/offchain/host-runner/tests/http_tests/exception.rs deleted file mode 100644 index 123a1c023..000000000 --- a/offchain/host-runner/tests/http_tests/exception.rs +++ /dev/null @@ -1,98 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_notifies_exception_during_advance_state() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - let payload = create_payload(); - http_client::notify_exception(http_client::convert_binary_to_hex(&payload)) - .await - .unwrap(); - let processed = finish_advance_state(&mut grpc_client, "rollup session") - .await - .unwrap(); - match processed.processed_input_one_of.unwrap() { - grpc_client::processed_input::ProcessedInputOneOf::AcceptedData(_) => { - panic!("unexpected advance result"); - } - grpc_client::processed_input::ProcessedInputOneOf::ExceptionData( - result, - ) => { - assert_eq!(result, payload); - } - } -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_notifies_exception_during_inspect_state() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let inspect_handle = tokio::spawn(async move { - grpc_client - .inspect_state(grpc_client::InspectStateRequest { - session_id: "rollup session".into(), - query_payload: create_payload(), - }) - .await - .unwrap() - .into_inner() - }); - http_client::finish("accept".into()).await.unwrap(); - let payload = create_payload(); - http_client::notify_exception(http_client::convert_binary_to_hex(&payload)) - .await - .unwrap(); - let response = inspect_handle.await.unwrap(); - let expected = grpc_client::InspectStateResponse { - session_id: String::from("rollup session"), - active_epoch_index: 0, - processed_input_count: 0, - status: grpc_client::CompletionStatus::Exception as i32, - exception_data: Some(payload), - reports: vec![], - }; - assert_eq!(response, expected); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_notify_exception_with_incorrect_data() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - let response = http_client::notify_exception("deadbeef".into()).await; - assert_eq!( - response, - Err(http_client::HttpError { - status: 400, - message: "Failed to decode ethereum binary string deadbeef (expected 0x prefix)".into(), - }) - ); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_notify_exception_during_idle_state() { - let _manager = manager::Wrapper::new().await; - let response = - http_client::notify_exception(http_client::create_payload()).await; - assert_eq!( - response, - Err(http_client::HttpError { - status: 400, - message: "invalid request exception in idle state".into(), - }) - ); -} diff --git a/offchain/host-runner/tests/http_tests/finish.rs b/offchain/host-runner/tests/http_tests/finish.rs deleted file mode 100644 index 8a9e7867e..000000000 --- a/offchain/host-runner/tests/http_tests/finish.rs +++ /dev/null @@ -1,179 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_finishes_after_advance_request() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - // Perform the advance request - let advance_request = - grpc_client::create_advance_state_request("rollup session", 0, 0); - grpc_client - .advance_state(advance_request.clone()) - .await - .unwrap(); - // Then perform the finish request - let response = http_client::finish("accept".into()).await.unwrap(); - // Then compare the received request with the expected one - let expected_metadata = advance_request.input_metadata.unwrap(); - let expected_sender = expected_metadata.msg_sender.unwrap().data; - assert_eq!( - response, - http_client::RollupHttpRequest::Advance { - data: http_client::AdvanceRequest { - metadata: http_client::AdvanceMetadata { - msg_sender: String::from("0x") - + &hex::encode(&expected_sender), - epoch_index: expected_metadata.epoch_index, - input_index: expected_metadata.input_index, - block_number: expected_metadata.block_number, - timestamp: expected_metadata.timestamp, - }, - payload: String::from("0x") - + &hex::encode(&advance_request.input_payload), - } - } - ); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_finishes_after_inspect_request() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let query_payload = create_payload(); - let payload = http_client::convert_binary_to_hex(&query_payload); - // Perform the inspect request in another thread because it is blocking - tokio::spawn(async move { - grpc_client - .inspect_state(grpc_client::InspectStateRequest { - session_id: "rollup session".into(), - query_payload, - }) - .await - .unwrap() - .into_inner() - }); - // Then perform the finish request - let response = http_client::finish("accept".into()).await.unwrap(); - // Then compare the received request with the expected one - assert_eq!( - response, - http_client::RollupHttpRequest::Inspect { - data: http_client::InspectRequest { payload } - } - ); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_finishes_before_advance_request() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let finish_handler = tokio::spawn(http_client::finish("accept".into())); - // Wait for a bit before sending advance request - tokio::time::sleep(std::time::Duration::from_millis(50)).await; - grpc_client - .advance_state(grpc_client::create_advance_state_request( - "rollup session", - 0, - 0, - )) - .await - .unwrap(); - // Then receive and compare finish response - let response = finish_handler.await.unwrap(); - assert!(matches!( - response, - Ok(http_client::RollupHttpRequest::Advance { .. }) - )); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_finishes_current_advance_request() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - finish_advance_state(&mut grpc_client, "rollup session") - .await - .unwrap(); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_handles_finish_while_waiting_for_rollup_request() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - // Peform the first finish call in another thread - let first_handler = tokio::spawn(http_client::finish("accept".into())); - // Wait for a bit and perform another finish call before the previous one returned - tokio::time::sleep(std::time::Duration::from_millis(10)).await; - // Check the second finish response - let err = http_client::finish("accept".into()).await.unwrap_err(); - assert_eq!( - err, - http_client::HttpError { - status: 202, - message: String::from("no rollup request available"), - } - ); - // Check the first finish response - let err = first_handler.await.unwrap().unwrap_err(); - assert_eq!( - err, - http_client::HttpError { - status: 202, - message: String::from("no rollup request available"), - } - ); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_times_out_finish_request() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - // Perform the finish request and wait until it times out - let err = http_client::finish("accept".into()).await.unwrap_err(); - assert_eq!( - err, - http_client::HttpError { - status: 202, - message: String::from("no rollup request available"), - } - ); -} diff --git a/offchain/host-runner/tests/http_tests/notice.rs b/offchain/host-runner/tests/http_tests/notice.rs deleted file mode 100644 index b047c6d99..000000000 --- a/offchain/host-runner/tests/http_tests/notice.rs +++ /dev/null @@ -1,66 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_insert_notice_during_advance_state() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - // Send notices - const N: usize = 3; - for i in 0..N { - let result = http_client::insert_notice(http_client::create_payload()) - .await - .unwrap(); - assert_eq!(result.index, i); - } - // Check if notices arrived - let processed = finish_advance_state(&mut grpc_client, "rollup session") - .await - .unwrap(); - match processed.processed_input_one_of.unwrap() { - grpc_client::processed_input::ProcessedInputOneOf::AcceptedData( - result, - ) => { - assert_eq!(result.notices.len(), N); - } - grpc_client::processed_input::ProcessedInputOneOf::ExceptionData(_) => { - panic!("unexpected advance result"); - } - } -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_insert_notice_with_incorrect_data() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - let response = http_client::insert_notice("deadbeef".into()).await; - assert_eq!( - response, - Err(http_client::HttpError { - status: 400, - message: "Failed to decode ethereum binary string deadbeef (expected 0x prefix)".into(), - }) - ); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_insert_notice_during_idle_state() { - let _manager = manager::Wrapper::new().await; - // Don't perform setup on purpose - let response = - http_client::insert_notice(http_client::create_payload()).await; - assert_eq!( - response, - Err(http_client::HttpError { - status: 400, - message: "invalid request notice in idle state".into(), - }) - ); -} diff --git a/offchain/host-runner/tests/http_tests/report.rs b/offchain/host-runner/tests/http_tests/report.rs deleted file mode 100644 index b37e475c6..000000000 --- a/offchain/host-runner/tests/http_tests/report.rs +++ /dev/null @@ -1,92 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_insert_report_during_advance_state() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - // Send reports - const N: usize = 3; - for _ in 0..N { - http_client::insert_report(http_client::create_payload()) - .await - .unwrap(); - } - // Check if reports arrived - let processed = finish_advance_state(&mut grpc_client, "rollup session") - .await - .unwrap(); - assert_eq!(processed.reports.len(), N); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_insert_report_during_inspect_state() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - grpc_client - .start_session(grpc_client::create_start_session_request( - "rollup session", - )) - .await - .unwrap(); - let inspect_handle = tokio::spawn(async move { - grpc_client - .inspect_state(grpc_client::InspectStateRequest { - session_id: "rollup session".into(), - query_payload: create_payload(), - }) - .await - .unwrap() - .into_inner() - }); - http_client::finish("accept".into()).await.unwrap(); - // Send a reports - const N: usize = 3; - for _ in 0..N { - http_client::insert_report(http_client::create_payload()) - .await - .unwrap(); - } - // Perform final finish call - tokio::spawn(http_client::finish("accept".into())); - // Obtain the inspect result - let response = inspect_handle.await.unwrap(); - assert_eq!(response.reports.len(), N); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_insert_report_with_incorrect_data() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - let response = http_client::insert_report("deadbeef".into()).await; - assert_eq!( - response, - Err(http_client::HttpError { - status: 400, - message: "Failed to decode ethereum binary string deadbeef (expected 0x prefix)".into(), - }) - ); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_insert_report_during_idle_state() { - let _manager = manager::Wrapper::new().await; - // Don't perform setup on purpose - let response = - http_client::insert_report(http_client::create_payload()).await; - assert_eq!( - response, - Err(http_client::HttpError { - status: 400, - message: "invalid request report in idle state".into(), - }) - ); -} diff --git a/offchain/host-runner/tests/http_tests/voucher.rs b/offchain/host-runner/tests/http_tests/voucher.rs deleted file mode 100644 index 693914213..000000000 --- a/offchain/host-runner/tests/http_tests/voucher.rs +++ /dev/null @@ -1,73 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::common::*; - -#[tokio::test] -#[serial_test::serial] -async fn test_it_insert_voucher_during_advance_state() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - // Send vouchers - const N: usize = 3; - for i in 0..N { - let destination = http_client::create_address(); - let payload = http_client::create_payload(); - let result = http_client::insert_voucher(destination, payload) - .await - .unwrap(); - assert_eq!(result.index, i); - } - // Check if vouchers arrived - let processed = finish_advance_state(&mut grpc_client, "rollup session") - .await - .unwrap(); - match processed.processed_input_one_of.unwrap() { - grpc_client::processed_input::ProcessedInputOneOf::AcceptedData( - result, - ) => { - assert_eq!(result.vouchers.len(), N); - } - grpc_client::processed_input::ProcessedInputOneOf::ExceptionData(_) => { - panic!("unexpected advance result"); - } - } -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_insert_voucher_with_incorrect_data() { - let _manager = manager::Wrapper::new().await; - let mut grpc_client = grpc_client::connect().await; - setup_advance_state(&mut grpc_client, "rollup session").await; - let response = http_client::insert_voucher( - http_client::create_address(), - "deadbeef".into(), - ) - .await; - assert_eq!( - response, - Err(http_client::HttpError { - status: 400, - message: "Failed to decode ethereum binary string deadbeef (expected 0x prefix)".into(), - }) - ); -} - -#[tokio::test] -#[serial_test::serial] -async fn test_it_fails_to_insert_voucher_during_idle_state() { - let _manager = manager::Wrapper::new().await; - // Don't perform setup on purpose - let destination = http_client::create_address(); - let payload = http_client::create_payload(); - let response = http_client::insert_voucher(destination, payload).await; - assert_eq!( - response, - Err(http_client::HttpError { - status: 400, - message: "invalid request voucher in idle state".into(), - }) - ); -} diff --git a/offchain/http-health-check/Cargo.toml b/offchain/http-health-check/Cargo.toml deleted file mode 100644 index 2fd439148..000000000 --- a/offchain/http-health-check/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "http-health-check" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -axum.workspace = true -snafu.workspace = true -tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } -tracing.workspace = true diff --git a/offchain/http-health-check/README.md b/offchain/http-health-check/README.md deleted file mode 100644 index ad5e2d30e..000000000 --- a/offchain/http-health-check/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# HTTP Healthcheck - -This crate is a library that starts an HTTP server with the healthcheck endpoint. diff --git a/offchain/http-health-check/src/lib.rs b/offchain/http-health-check/src/lib.rs deleted file mode 100644 index fcf45c6f5..000000000 --- a/offchain/http-health-check/src/lib.rs +++ /dev/null @@ -1,31 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use axum::{routing::get, Router}; -use snafu::{ResultExt, Snafu}; -use std::net::SocketAddr; - -#[derive(Debug, Snafu)] -pub enum HealthCheckError { - #[snafu(display("could not parse host address"))] - ParseAddressError { source: std::net::AddrParseError }, - - #[snafu(display("http health-check server error"))] - HttpServerError { source: std::io::Error }, -} - -#[tracing::instrument(level = "trace", skip_all)] -pub async fn start(port: u16) -> Result<(), HealthCheckError> { - tracing::trace!(?port, "starting health-check server on this port"); - - let ip = "0.0.0.0".parse().context(ParseAddressSnafu)?; - let addr = SocketAddr::new(ip, port); - let app = Router::new().route("/healthz", get(|| async { "" })); - let listener = tokio::net::TcpListener::bind(&addr) - .await - .context(HttpServerSnafu)?; - - tracing::trace!(address = ?listener.local_addr(), "http healthcheck address bound"); - - axum::serve(listener, app).await.context(HttpServerSnafu) -} diff --git a/offchain/http-server/Cargo.toml b/offchain/http-server/Cargo.toml deleted file mode 100644 index 9a8cb4faa..000000000 --- a/offchain/http-server/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "http-server" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -axum.workspace = true -clap = { workspace = true, features = ["derive", "env", "string"] } -hyper.workspace = true -prometheus-client.workspace = true -tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } -tracing.workspace = true diff --git a/offchain/http-server/src/config.rs b/offchain/http-server/src/config.rs deleted file mode 100644 index 7a32ac82d..000000000 --- a/offchain/http-server/src/config.rs +++ /dev/null @@ -1,45 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::{ - value_parser, Arg, Command, CommandFactory, FromArgMatches, Parser, -}; - -#[derive(Debug, Clone, Parser)] -pub struct HttpServerConfig { - pub(crate) port: u16, -} - -impl HttpServerConfig { - /// Returns the HTTP server config and the app's config after parsing - /// it from the command line and/or environment variables. - /// - /// The parameter `service` must be a lowercase string that - /// uses underlines as spaces. - /// - /// The parametric type `C` must be a struct that derives `Parser`. - pub fn parse( - service: &'static str, - ) -> (HttpServerConfig, C) { - let command = ::command(); - let command = add_port_arg(command, service); - - let matches = command.get_matches(); - let http_server_config: HttpServerConfig = - FromArgMatches::from_arg_matches(&matches).unwrap(); - let inner_config: C = - FromArgMatches::from_arg_matches(&matches).unwrap(); - (http_server_config, inner_config) - } -} - -fn add_port_arg(command: Command, service: S) -> Command { - let service = service.to_string().to_uppercase(); - command.arg( - Arg::new("port") - .long("http-server-port") - .env(format!("{}_HTTP_SERVER_PORT", service)) - .value_parser(value_parser!(u16)) - .default_value("8080"), - ) -} diff --git a/offchain/indexer/Cargo.toml b/offchain/indexer/Cargo.toml deleted file mode 100644 index d3d8d524e..000000000 --- a/offchain/indexer/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "indexer" -edition.workspace = true -license.workspace = true -version.workspace = true - -[[bin]] -name = "cartesi-rollups-indexer" -path = "src/main.rs" -test = false - -[dependencies] -http-health-check = { path = "../http-health-check" } -log = { path = "../log" } -rollups-data = { path = "../data" } -rollups-events = { path = "../rollups-events" } - -clap = { workspace = true, features = ["derive", "env"] } -snafu.workspace = true -tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } -tracing.workspace = true - -[dev-dependencies] -test-fixtures = { path = "../test-fixtures" } - -backoff.workspace = true -env_logger.workspace = true -rand.workspace = true -serial_test.workspace = true -test-log = { workspace = true, features = ["trace"] } -testcontainers.workspace = true -tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/offchain/indexer/README.md b/offchain/indexer/README.md deleted file mode 100644 index ef595dc31..000000000 --- a/offchain/indexer/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# Indexer - -This service is responsible for inserting Rollups inputs and outputs in the PostgreSQL database. -The indexer consumes the inputs and the outputs from the rollups broker. diff --git a/offchain/indexer/src/config.rs b/offchain/indexer/src/config.rs deleted file mode 100644 index 747bec999..000000000 --- a/offchain/indexer/src/config.rs +++ /dev/null @@ -1,56 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; - -use log::{LogConfig, LogEnvCliConfig}; -pub use rollups_data::{RepositoryCLIConfig, RepositoryConfig}; -pub use rollups_events::{ - BrokerCLIConfig, BrokerConfig, DAppMetadata, DAppMetadataCLIConfig, -}; - -#[derive(Debug)] -pub struct IndexerConfig { - pub repository_config: RepositoryConfig, - pub dapp_metadata: DAppMetadata, - pub broker_config: BrokerConfig, - pub log_config: LogConfig, - pub healthcheck_port: u16, -} - -#[derive(Parser)] -#[command(name = "indexer_config")] -#[command(about = "Configuration for indexer")] -pub struct CLIConfig { - #[command(flatten)] - repository_config: RepositoryCLIConfig, - - #[command(flatten)] - dapp_metadata_config: DAppMetadataCLIConfig, - - #[command(flatten)] - broker_config: BrokerCLIConfig, - - #[command(flatten)] - pub log_config: LogEnvCliConfig, - - /// Port of health check - #[arg( - long = "healthcheck-port", - env = "INDEXER_HEALTHCHECK_PORT", - default_value_t = 8080 - )] - pub healthcheck_port: u16, -} - -impl From for IndexerConfig { - fn from(cli_config: CLIConfig) -> Self { - Self { - repository_config: cli_config.repository_config.into(), - dapp_metadata: cli_config.dapp_metadata_config.into(), - broker_config: cli_config.broker_config.into(), - log_config: cli_config.log_config.into(), - healthcheck_port: cli_config.healthcheck_port, - } - } -} diff --git a/offchain/indexer/src/conversions.rs b/offchain/indexer/src/conversions.rs deleted file mode 100644 index 1b579c428..000000000 --- a/offchain/indexer/src/conversions.rs +++ /dev/null @@ -1,126 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -//! Convert from rollups-events types to rollups-data types. -//! This code cannot use the From trait because both types are defined in -//! external crates. -use std::time::{Duration, UNIX_EPOCH}; - -use rollups_events::{ - RollupsAdvanceStateInput, RollupsCompletionStatus, RollupsNotice, - RollupsOutputEnum, RollupsProof, RollupsReport, RollupsVoucher, -}; - -use rollups_data::{ - CompletionStatus, Input, Notice, OutputEnum, Proof, Report, Voucher, -}; - -pub fn convert_status(status: RollupsCompletionStatus) -> CompletionStatus { - match status { - RollupsCompletionStatus::Accepted => CompletionStatus::Accepted, - RollupsCompletionStatus::Rejected => CompletionStatus::Rejected, - RollupsCompletionStatus::Exception => CompletionStatus::Exception, - RollupsCompletionStatus::MachineHalted => { - CompletionStatus::MachineHalted - } - RollupsCompletionStatus::CycleLimitExceeded => { - CompletionStatus::CycleLimitExceeded - } - RollupsCompletionStatus::TimeLimitExceeded => { - CompletionStatus::TimeLimitExceeded - } - RollupsCompletionStatus::PayloadLengthLimitExceeded => { - CompletionStatus::PayloadLengthLimitExceeded - } - } -} - -pub fn convert_input(input: RollupsAdvanceStateInput) -> Input { - let timestamp = UNIX_EPOCH + Duration::from_secs(input.metadata.timestamp); - Input { - index: input.metadata.input_index as i32, - msg_sender: input.metadata.msg_sender.into_inner().into(), - tx_hash: input.tx_hash.into_inner().into(), - block_number: input.metadata.block_number as i64, - timestamp, - payload: input.payload.into_inner(), - status: CompletionStatus::Unprocessed, - } -} - -pub fn convert_voucher(voucher: RollupsVoucher) -> Voucher { - Voucher { - input_index: voucher.input_index as i32, - index: voucher.index as i32, - destination: voucher.destination.into_inner().into(), - payload: voucher.payload.into_inner(), - } -} - -pub fn convert_notice(notice: RollupsNotice) -> Notice { - Notice { - input_index: notice.input_index as i32, - index: notice.index as i32, - payload: notice.payload.into_inner(), - } -} - -pub fn convert_report(report: RollupsReport) -> Report { - Report { - input_index: report.input_index as i32, - index: report.index as i32, - payload: report.payload.into_inner(), - } -} - -pub fn convert_proof(proof: RollupsProof) -> Proof { - Proof { - input_index: proof.input_index as i32, - output_index: proof.output_index as i32, - output_enum: match proof.output_enum { - RollupsOutputEnum::Voucher => OutputEnum::Voucher, - RollupsOutputEnum::Notice => OutputEnum::Notice, - }, - validity_input_index_within_epoch: proof - .validity - .input_index_within_epoch - as i32, - validity_output_index_within_input: proof - .validity - .output_index_within_input - as i32, - validity_output_hashes_root_hash: proof - .validity - .output_hashes_root_hash - .into_inner() - .into(), - validity_vouchers_epoch_root_hash: proof - .validity - .vouchers_epoch_root_hash - .into_inner() - .into(), - validity_notices_epoch_root_hash: proof - .validity - .notices_epoch_root_hash - .into_inner() - .into(), - validity_machine_state_hash: proof - .validity - .machine_state_hash - .into_inner() - .into(), - validity_output_hash_in_output_hashes_siblings: proof - .validity - .output_hash_in_output_hashes_siblings - .into_iter() - .map(|hash| Some(hash.into_inner().into())) - .collect(), - validity_output_hashes_in_epoch_siblings: proof - .validity - .output_hashes_in_epoch_siblings - .into_iter() - .map(|hash| Some(hash.into_inner().into())) - .collect(), - context: proof.context.into_inner(), - } -} diff --git a/offchain/indexer/src/error.rs b/offchain/indexer/src/error.rs deleted file mode 100644 index 9fad2bf3e..000000000 --- a/offchain/indexer/src/error.rs +++ /dev/null @@ -1,27 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] -pub enum IndexerError { - #[snafu(display("health check error"))] - HealthCheckError { - source: http_health_check::HealthCheckError, - }, - - #[snafu(display("broker error"))] - BrokerError { source: rollups_events::BrokerError }, - - #[snafu(display("migrations error"))] - MigrationsError { - source: rollups_data::MigrationError, - }, - - #[snafu(display("repository error"))] - RepositoryError { source: rollups_data::Error }, - - #[snafu(display("join error"))] - JoinError { source: tokio::task::JoinError }, -} diff --git a/offchain/indexer/src/indexer.rs b/offchain/indexer/src/indexer.rs deleted file mode 100644 index 47dc38ed7..000000000 --- a/offchain/indexer/src/indexer.rs +++ /dev/null @@ -1,130 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use rollups_data::Repository; -use rollups_events::indexer::{IndexerEvent, IndexerState}; -use rollups_events::{ - Broker, BrokerError, RollupsData, RollupsInput, RollupsOutput, -}; -use snafu::ResultExt; - -use crate::conversions::*; -use crate::error::{ - BrokerSnafu, IndexerError, JoinSnafu, MigrationsSnafu, RepositorySnafu, -}; -use crate::IndexerConfig; - -pub struct Indexer { - repository: Repository, - broker: Broker, - state: IndexerState, -} - -impl Indexer { - #[tracing::instrument(level = "trace", skip_all)] - pub async fn start(config: IndexerConfig) -> Result<(), IndexerError> { - tracing::info!("running database migrations"); - let endpoint = config.repository_config.endpoint(); - rollups_data::run_migrations(&endpoint).context(MigrationsSnafu)?; - - tracing::info!("runned migrations; connecting to DB"); - let repository = tokio::task::spawn_blocking(|| { - Repository::new(config.repository_config) - }) - .await - .context(JoinSnafu)? - .context(RepositorySnafu)?; - - tracing::info!("connected to database; connecting to broker"); - let broker = Broker::new(config.broker_config) - .await - .context(BrokerSnafu)?; - - let state = IndexerState::new(&config.dapp_metadata); - let mut indexer = Indexer { - repository, - broker, - state, - }; - - tracing::info!("connected to broker; starting main loop"); - loop { - let event = indexer.consume_event().await?; - let repository = indexer.repository.clone(); - tokio::task::spawn_blocking(move || match event { - IndexerEvent::Input(input) => { - store_input(&repository, input.payload) - } - IndexerEvent::Output(output) => { - store_output(&repository, output.payload) - } - }) - .await - .context(JoinSnafu)? - .context(RepositorySnafu)?; - } - } - - #[tracing::instrument(level = "trace", skip_all)] - async fn consume_event(&mut self) -> Result { - tracing::info!(?self.state, "waiting for next event"); - loop { - match self.broker.indexer_consume(&mut self.state).await { - Ok(event) => { - tracing::info!(?event, "received event"); - return Ok(event); - } - Err(source) => match source { - BrokerError::ConsumeTimeout => { - tracing::trace!("broker timed out, trying again"); - continue; - } - _ => { - return Err(IndexerError::BrokerError { source }); - } - }, - } - } - } -} - -#[tracing::instrument(level = "trace", skip_all)] -fn store_input( - repository: &Repository, - input: RollupsInput, -) -> Result<(), rollups_data::Error> { - match input.data { - RollupsData::AdvanceStateInput(input) => { - repository.insert_input(convert_input(input)) - } - RollupsData::FinishEpoch {} => { - tracing::trace!("ignoring finish epoch"); - Ok(()) - } - } -} - -#[tracing::instrument(level = "trace", skip_all)] -fn store_output( - repository: &Repository, - output: RollupsOutput, -) -> Result<(), rollups_data::Error> { - match output { - RollupsOutput::AdvanceResult(result) => repository.update_input_status( - result.input_index as i32, - convert_status(result.status), - ), - RollupsOutput::Voucher(voucher) => { - repository.insert_voucher(convert_voucher(voucher)) - } - RollupsOutput::Notice(notice) => { - repository.insert_notice(convert_notice(notice)) - } - RollupsOutput::Report(report) => { - repository.insert_report(convert_report(report)) - } - RollupsOutput::Proof(proof) => { - repository.insert_proof(convert_proof(proof)) - } - } -} diff --git a/offchain/indexer/src/lib.rs b/offchain/indexer/src/lib.rs deleted file mode 100644 index f8e8cf4d2..000000000 --- a/offchain/indexer/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use snafu::ResultExt; - -pub use config::{CLIConfig, IndexerConfig}; -pub use error::IndexerError; - -pub mod config; -mod conversions; -mod error; -mod indexer; - -#[tracing::instrument(level = "trace", skip_all)] -pub async fn run(config: IndexerConfig) -> Result<(), IndexerError> { - let health_handle = http_health_check::start(config.healthcheck_port); - let indexer_handle = indexer::Indexer::start(config); - tokio::select! { - ret = health_handle => { - ret.context(error::HealthCheckSnafu) - } - ret = indexer_handle => { - ret - } - } -} diff --git a/offchain/indexer/src/main.rs b/offchain/indexer/src/main.rs deleted file mode 100644 index 4ca5f6955..000000000 --- a/offchain/indexer/src/main.rs +++ /dev/null @@ -1,17 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; - -use indexer::{CLIConfig, IndexerConfig}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - let config: IndexerConfig = CLIConfig::parse().into(); - - log::configure(&config.log_config); - - log::log_service_start(&config, "Indexer"); - - indexer::run(config).await.map_err(|e| e.into()) -} diff --git a/offchain/indexer/tests/integration.rs b/offchain/indexer/tests/integration.rs deleted file mode 100644 index 7b8a51bd8..000000000 --- a/offchain/indexer/tests/integration.rs +++ /dev/null @@ -1,543 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use indexer::IndexerError; -use log::LogConfig; -use rand::Rng; -use rollups_data::{ - Input, Notice, OutputEnum, Proof, Report, RepositoryConfig, Voucher, -}; -use rollups_events::{ - BrokerConfig, BrokerEndpoint, DAppMetadata, InputMetadata, - RollupsAdvanceStateInput, RollupsData, RollupsNotice, RollupsOutput, - RollupsOutputEnum, RollupsOutputValidityProof, RollupsProof, RollupsReport, - RollupsVoucher, -}; -use serial_test::serial; -use std::time::UNIX_EPOCH; -use test_fixtures::{BrokerFixture, RepositoryFixture}; -use testcontainers::clients::Cli; -use tokio::task::JoinHandle; - -const BROKER_CONSUME_TIMEOUT: usize = 100; - -/// Starts one container with the broker, one container with the database, -/// and the indexer in a background thread. -struct TestState<'d> { - broker: BrokerFixture<'d>, - repository: RepositoryFixture<'d>, - indexer: JoinHandle>, -} - -#[test_log::test(tokio::test)] -#[serial] -async fn indexer_inserts_inputs() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - const N: u64 = 3; - let mut inputs = vec![]; - for i in 0..N { - let input = state.produce_input_in_broker(i).await; - inputs.push(input); - } - - for input_sent in inputs.into_iter() { - let input_read = state.get_input_from_database(&input_sent).await; - assert_input_eq(&input_sent, &input_read); - } -} - -#[test_log::test(tokio::test)] -#[serial] -async fn indexer_inserts_vouchers() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - const N: u64 = 3; - let mut vouchers = vec![]; - for i in 0..N { - state.produce_input_in_broker(i).await; - for j in 0..N { - let voucher = state.produce_voucher_in_broker(i, j).await; - vouchers.push(voucher) - } - } - - for voucher_sent in vouchers.into_iter() { - let voucher_read = state.get_voucher_from_database(&voucher_sent).await; - assert_voucher_eq(&voucher_sent, &voucher_read); - } -} - -#[test_log::test(tokio::test)] -#[serial] -async fn indexer_inserts_notices() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - const N: u64 = 3; - let mut notices = vec![]; - for i in 0..N { - state.produce_input_in_broker(i).await; - for j in 0..N { - let notice = state.produce_notice_in_broker(i, j).await; - notices.push(notice); - } - } - - for notice_sent in notices.into_iter() { - let notice_read = state.get_notice_from_database(¬ice_sent).await; - assert_notice_eq(¬ice_sent, ¬ice_read); - } -} - -#[test_log::test(tokio::test)] -#[serial] -async fn indexer_inserts_reports() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - const N: u64 = 3; - let mut reports = vec![]; - for i in 0..N { - state.produce_input_in_broker(i).await; - for j in 0..N { - let report = state.produce_report_in_broker(i, j).await; - reports.push(report); - } - } - - for report_sent in reports.into_iter() { - let report_read = state.get_report_from_database(&report_sent).await; - assert_report_eq(&report_sent, &report_read); - } -} - -#[test_log::test(tokio::test)] -#[serial] -async fn indexer_inserts_proofs() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - const N: u64 = 3; - let mut proofs = vec![]; - for i in 0..N { - state.produce_input_in_broker(i).await; - for j in 0..N { - state.produce_voucher_in_broker(i, j).await; - state.produce_notice_in_broker(i, j).await; - proofs.push( - state - .produce_proof_in_broker(i, j, RollupsOutputEnum::Voucher) - .await, - ); - proofs.push( - state - .produce_proof_in_broker(i, j, RollupsOutputEnum::Notice) - .await, - ); - } - } - - for proof_sent in proofs.into_iter() { - let proof_read = state.get_proof_from_database(&proof_sent).await; - assert_proof_eq(&proof_sent, &proof_read); - } -} - -#[test_log::test(tokio::test)] -#[serial] -async fn indexer_ignores_finish_epoch_and_insert_input_after() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("producing finish epoch"); - let data = RollupsData::FinishEpoch {}; - state.broker.produce_input_event(data).await; - - let input_sent = state.produce_input_in_broker(0).await; - let input_read = state.get_input_from_database(&input_sent).await; - assert_input_eq(&input_sent, &input_read); -} - -#[test_log::test(tokio::test)] -#[serial] -async fn indexer_does_not_override_existing_input() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - let original_input = state.produce_input_in_broker(0).await; - let _second_input = state.produce_input_in_broker(0).await; - let input_read = state.get_input_from_database(&original_input).await; - assert_input_eq(&original_input, &input_read); -} - -#[test_log::test(tokio::test)] -#[serial] -async fn indexer_inserts_input_after_broker_timeout() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - tracing::info!("sleeping so the broker consume times out in indexer"); - tokio::time::sleep(std::time::Duration::from_millis( - 2 * BROKER_CONSUME_TIMEOUT as u64, - )) - .await; - - let input_sent = state.produce_input_in_broker(0).await; - let input_read = state.get_input_from_database(&input_sent).await; - assert_input_eq(&input_sent, &input_read); -} - -#[test_log::test(tokio::test)] -#[serial] -async fn indexer_fails_to_insert_output_when_input_does_not_exist() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - - state.produce_voucher_in_broker(0, 0).await; - let error = state.get_indexer_error().await; - assert!(matches!(error, IndexerError::RepositoryError { .. })); -} - -impl TestState<'_> { - async fn setup(docker: &Cli) -> TestState<'_> { - let broker = BrokerFixture::setup(docker).await; - let repository = RepositoryFixture::setup(docker); - let indexer = spawn_indexer( - repository.config(), - broker.redis_endpoint().to_owned(), - broker.dapp_metadata(), - ) - .await; - TestState { - broker, - repository, - indexer, - } - } - - /// Wait for the indexer to fail and return the error - async fn get_indexer_error(self) -> IndexerError { - tracing::info!("waiting for indexer to fail"); - self.indexer - .await - .expect("failed to wait for indexer") - .expect_err("indexer should exit with error") - } - - async fn produce_input_in_broker( - &self, - input_index: u64, - ) -> RollupsAdvanceStateInput { - let timestamp = std::time::SystemTime::now() - .duration_since(std::time::UNIX_EPOCH) - .unwrap() - .as_millis() as u64; - self.produce_input_in_broker_with_timestamp(input_index, timestamp) - .await - } - - async fn produce_input_in_broker_with_timestamp( - &self, - input_index: u64, - timestamp: u64, - ) -> RollupsAdvanceStateInput { - let metadata = InputMetadata { - epoch_index: 0, - input_index, - block_number: rand::thread_rng().gen(), - msg_sender: random_array().into(), - timestamp, - }; - let input = RollupsAdvanceStateInput { - metadata, - payload: random_array::<32>().to_vec().into(), - tx_hash: random_array().into(), - }; - let data = RollupsData::AdvanceStateInput(input.clone()); - - tracing::info!(?input, "producing input"); - self.broker.produce_input_event(data).await; - input - } - - async fn get_input_from_database( - &self, - input_sent: &RollupsAdvanceStateInput, - ) -> Input { - tracing::info!("waiting for input in database"); - let index = input_sent.metadata.input_index as i32; - self.repository.retry(move |r| r.get_input(index)).await - } - - async fn produce_voucher_in_broker( - &self, - input_index: u64, - index: u64, - ) -> RollupsVoucher { - let voucher = RollupsVoucher { - index, - input_index, - destination: random_array().into(), - payload: random_array::<32>().to_vec().into(), - }; - let output = RollupsOutput::Voucher(voucher.clone()); - - tracing::info!(?voucher, "producing voucher"); - self.broker.produce_output(output).await; - voucher - } - - async fn get_voucher_from_database( - &self, - voucher_sent: &RollupsVoucher, - ) -> Voucher { - tracing::info!("waiting for voucher in database"); - let input_index = voucher_sent.input_index as i32; - let index = voucher_sent.index as i32; - self.repository - .retry(move |r| r.get_voucher(index, input_index)) - .await - } - - async fn produce_notice_in_broker( - &self, - input_index: u64, - index: u64, - ) -> RollupsNotice { - let notice = RollupsNotice { - index, - input_index, - payload: random_array::<32>().to_vec().into(), - }; - let output = RollupsOutput::Notice(notice.clone()); - - tracing::info!(?notice, "producing notice"); - self.broker.produce_output(output).await; - notice - } - - async fn get_notice_from_database( - &self, - notice_sent: &RollupsNotice, - ) -> Notice { - tracing::info!("waiting for notice in database"); - let input_index = notice_sent.input_index as i32; - let index = notice_sent.index as i32; - self.repository - .retry(move |r| r.get_notice(index, input_index)) - .await - } - - async fn produce_report_in_broker( - &self, - input_index: u64, - index: u64, - ) -> RollupsReport { - let report = RollupsReport { - index, - input_index, - payload: random_array::<32>().to_vec().into(), - }; - let output = RollupsOutput::Report(report.clone()); - - tracing::info!(?report, "producing report"); - self.broker.produce_output(output).await; - report - } - - async fn get_report_from_database( - &self, - report_sent: &RollupsReport, - ) -> Report { - tracing::info!("waiting for report in database"); - let input_index = report_sent.input_index as i32; - let index = report_sent.index as i32; - self.repository - .retry(move |r| r.get_report(index, input_index)) - .await - } - - async fn produce_proof_in_broker( - &self, - input_index: u64, - output_index: u64, - output_enum: RollupsOutputEnum, - ) -> RollupsProof { - let validity = RollupsOutputValidityProof { - input_index_within_epoch: input_index, - output_index_within_input: output_index, - output_hashes_root_hash: random_array().into(), - vouchers_epoch_root_hash: random_array().into(), - notices_epoch_root_hash: random_array().into(), - machine_state_hash: random_array().into(), - output_hash_in_output_hashes_siblings: vec![random_array().into()], - output_hashes_in_epoch_siblings: vec![random_array().into()], - }; - let proof = RollupsProof { - input_index, - output_index, - output_enum, - validity, - context: random_array::<32>().to_vec().into(), - }; - let output = RollupsOutput::Proof(proof.clone()); - - tracing::info!(?proof, "producing proof"); - self.broker.produce_output(output).await; - proof - } - - async fn get_proof_from_database( - &self, - proof_sent: &RollupsProof, - ) -> Proof { - tracing::info!("waiting for proof in database"); - let input_index = proof_sent.input_index as i32; - let output_index = proof_sent.output_index as i32; - let output_enum = match proof_sent.output_enum { - RollupsOutputEnum::Voucher => OutputEnum::Voucher, - RollupsOutputEnum::Notice => OutputEnum::Notice, - }; - self.repository - .retry(move |r| { - match r.get_proof(input_index, output_index, output_enum) { - Ok(option_proof) => { - // The retry only works properly if the query - // returns item not found - option_proof.ok_or(rollups_data::Error::ItemNotFound { - item_type: "proof".to_owned(), - }) - } - Err(e) => Err(e), - } - }) - .await - } -} - -async fn spawn_indexer( - repository_config: RepositoryConfig, - redis_endpoint: BrokerEndpoint, - dapp_metadata: DAppMetadata, -) -> JoinHandle> { - let broker_config = BrokerConfig { - redis_endpoint, - consume_timeout: BROKER_CONSUME_TIMEOUT, - backoff: Default::default(), - }; - - let indexer_config = indexer::IndexerConfig { - repository_config, - dapp_metadata, - broker_config, - healthcheck_port: 0, - log_config: LogConfig::default(), - }; - tokio::spawn(async move { - indexer::run(indexer_config).await.map_err(|e| { - tracing::error!("{:?}", e); - e - }) - }) -} - -fn random_array() -> [u8; N] { - let mut arr = [0; N]; - for i in 0..N { - arr[i] = rand::thread_rng().gen(); - } - arr -} - -fn assert_input_eq(input_sent: &RollupsAdvanceStateInput, input_read: &Input) { - assert_eq!(input_read.index as u64, input_sent.metadata.input_index); - assert_eq!( - &input_read.msg_sender, - input_sent.metadata.msg_sender.inner() - ); - assert_eq!(&input_read.tx_hash, input_sent.tx_hash.inner()); - assert_eq!( - input_read.block_number as u64, - input_sent.metadata.block_number - ); - assert_eq!( - input_read - .timestamp - .duration_since(UNIX_EPOCH) - .expect("failed to get time") - .as_secs(), - input_sent.metadata.timestamp - ); - assert_eq!(&input_read.payload, input_sent.payload.inner()); -} - -fn assert_voucher_eq(voucher_sent: &RollupsVoucher, voucher_read: &Voucher) { - assert_eq!(voucher_read.index as u64, voucher_sent.index); - assert_eq!(voucher_read.input_index as u64, voucher_sent.input_index); - assert_eq!(&voucher_read.destination, voucher_sent.destination.inner()); - assert_eq!(&voucher_read.payload, voucher_sent.payload.inner()); -} - -fn assert_notice_eq(notice_sent: &RollupsNotice, notice_read: &Notice) { - assert_eq!(notice_read.index as u64, notice_sent.index); - assert_eq!(notice_read.input_index as u64, notice_sent.input_index); - assert_eq!(¬ice_read.payload, notice_sent.payload.inner()); -} - -fn assert_report_eq(report_sent: &RollupsReport, report_read: &Report) { - assert_eq!(report_read.index as u64, report_sent.index); - assert_eq!(report_read.input_index as u64, report_sent.input_index); - assert_eq!(&report_read.payload, report_sent.payload.inner()); -} - -fn assert_proof_eq(proof_sent: &RollupsProof, proof_read: &Proof) { - let output_enum = match proof_sent.output_enum { - RollupsOutputEnum::Voucher => OutputEnum::Voucher, - RollupsOutputEnum::Notice => OutputEnum::Notice, - }; - assert_eq!(proof_read.input_index as u64, proof_sent.input_index); - assert_eq!(proof_read.output_index as u64, proof_sent.output_index); - assert_eq!(proof_read.output_enum, output_enum); - assert_eq!( - proof_read.validity_input_index_within_epoch as u64, - proof_sent.validity.input_index_within_epoch - ); - assert_eq!( - proof_read.validity_output_index_within_input as u64, - proof_sent.validity.output_index_within_input - ); - assert_eq!( - &proof_read.validity_output_hashes_root_hash, - proof_sent.validity.output_hashes_root_hash.inner() - ); - assert_eq!( - &proof_read.validity_vouchers_epoch_root_hash, - proof_sent.validity.vouchers_epoch_root_hash.inner() - ); - assert_eq!( - &proof_read.validity_notices_epoch_root_hash, - proof_sent.validity.notices_epoch_root_hash.inner() - ); - assert_eq!( - &proof_read.validity_machine_state_hash, - proof_sent.validity.machine_state_hash.inner() - ); - for (siblings_read, siblings_sent) in proof_read - .validity_output_hash_in_output_hashes_siblings - .iter() - .zip(&proof_sent.validity.output_hash_in_output_hashes_siblings) - { - assert_eq!(siblings_read.as_ref().unwrap(), siblings_sent.inner()); - } - for (siblings_read, siblings_sent) in proof_read - .validity_output_hashes_in_epoch_siblings - .iter() - .zip(&proof_sent.validity.output_hashes_in_epoch_siblings) - { - assert_eq!(siblings_read.as_ref().unwrap(), siblings_sent.inner()); - } - assert_eq!(&proof_read.context, proof_sent.context.inner()); -} diff --git a/offchain/inspect-server/Cargo.toml b/offchain/inspect-server/Cargo.toml deleted file mode 100644 index 02039e551..000000000 --- a/offchain/inspect-server/Cargo.toml +++ /dev/null @@ -1,32 +0,0 @@ -[package] -name = "inspect-server" -edition.workspace = true -license.workspace = true -version.workspace = true - -[[bin]] -name = "cartesi-rollups-inspect-server" -path = "src/main.rs" - -[dependencies] -grpc-interfaces = { path = "../grpc-interfaces" } -http-health-check = { path = "../http-health-check" } -log = { path = "../log" } - -actix-cors.workspace = true -actix-web.workspace = true -clap = { workspace = true, features = ["derive", "env"] } -hex.workspace = true -serde = { workspace = true, features = ["rc", "derive"] } -snafu.workspace = true -tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } -toml.workspace = true -tonic.workspace = true -tracing.workspace = true -tracing-actix-web.workspace = true -uuid = { workspace = true, features = ["v4"] } - -[dev-dependencies] -futures.workspace = true -reqwest = { workspace = true, features = ["json"] } -serial_test.workspace = true diff --git a/offchain/inspect-server/README.md b/offchain/inspect-server/README.md deleted file mode 100644 index 436f6fdec..000000000 --- a/offchain/inspect-server/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Inspect Server - -This server receives HTTP inspect-state requests and sends them to the server-manager. -The specification of the HTTP inspect API can be found in the [openapi-interfaces](https://github.com/cartesi/openapi-interfaces/) repository. - -## Running - -To run the inspect-server locally you need to setup an instance of the server-manager. -This can be done setting up an example in the [rollups-examples](https://github.com/cartesi/rollups-examples) repository. - -1. Assuming you are running the server-manager on local port 5001, you could run the inspect server on port 5002 as such: - -```shell -cargo run -- --inspect-server-address localhost:5002 --server-manager-address localhost:5001 --session-id default_rollups_id -``` - -2. Then, you could submit an inspect request with payload "mypayload" by sending a GET HTTP request as follows: - -```shell -curl http://localhost:5002/inspect/mypayload -``` diff --git a/offchain/inspect-server/src/config.rs b/offchain/inspect-server/src/config.rs deleted file mode 100644 index 5bd1124e8..000000000 --- a/offchain/inspect-server/src/config.rs +++ /dev/null @@ -1,132 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -/// Configuration can be provided using command-line options, environment variables or -/// configuration file. -/// Command-line parameters take precedence over environment variables and environment variables -/// take precedence over same parameter from file configuration. -use clap::Parser; -use log::{LogConfig, LogEnvCliConfig}; -use serde::Deserialize; -use snafu::{ResultExt, Snafu}; - -#[derive(Debug, Snafu)] -pub enum ConfigError { - #[snafu(display("parse configuration file error"))] - FileError { source: std::io::Error }, - - #[snafu(display("parse configuration file error"))] - ParseError { source: toml::de::Error }, - - #[snafu(whatever, display("{message}"))] - Whatever { - message: String, - #[snafu(source(from(Box, Some)))] - source: Option>, - }, -} -#[derive(Debug)] -pub struct InspectServerConfig { - pub log_config: LogConfig, - pub inspect_server_address: String, - pub server_manager_address: String, - pub session_id: String, - pub queue_size: usize, - pub healthcheck_port: u16, -} - -#[derive(Parser)] -#[command(name = "inspect_server_config")] -#[command(about = "Configuration for inspect-server")] -pub struct CLIConfig { - #[command(flatten)] - pub log_config: LogEnvCliConfig, - - /// HTTP address for the inspect server - #[arg(long, env)] - inspect_server_address: Option, - - /// Server manager gRPC address - #[arg(long, env)] - server_manager_address: Option, - - /// Server manager session id - #[arg(long, env)] - session_id: Option, - - /// Queue size for concurrent inspect requests - #[arg(long, env)] - queue_size: Option, - - /// Path to the config file - #[arg(long, env)] - pub config_path: Option, - - /// Port of health check - #[arg( - long, - env = "INSPECT_SERVER_HEALTHCHECK_PORT", - default_value_t = 8080 - )] - pub healthcheck_port: u16, -} - -impl From for InspectServerConfig { - fn from(cli_config: CLIConfig) -> Self { - let file_config: FileConfig = load_config_file(cli_config.config_path) - .expect("couldn't read config file"); - - let inspect_server_address: String = cli_config - .inspect_server_address - .or(file_config.inspect_server_address) - .expect("couldn't retrieve inspect server address"); - - let server_manager_address: String = cli_config - .server_manager_address - .or(file_config.server_manager_address) - .expect("couldn't retrieve server manager address"); - - let session_id: String = cli_config - .session_id - .or(file_config.session_id) - .expect("couldn't retrieve session id"); - - let queue_size: usize = cli_config - .queue_size - .or(file_config.queue_size) - .unwrap_or(100); - - Self { - log_config: cli_config.log_config.into(), - inspect_server_address, - server_manager_address, - session_id, - queue_size, - healthcheck_port: cli_config.healthcheck_port, - } - } -} - -#[derive(Clone, Debug, Deserialize, Default)] -struct FileConfig { - inspect_server_address: Option, - server_manager_address: Option, - session_id: Option, - queue_size: Option, -} - -fn load_config_file( - // path to the config file if provided - config_file: Option, -) -> Result { - match config_file { - Some(config) => { - let s = std::fs::read_to_string(config).context(FileSnafu)?; - - let file_config: T = toml::from_str(&s).context(ParseSnafu)?; - - Ok(file_config) - } - None => Ok(T::default()), - } -} diff --git a/offchain/inspect-server/src/error.rs b/offchain/inspect-server/src/error.rs deleted file mode 100644 index 4155edcf5..000000000 --- a/offchain/inspect-server/src/error.rs +++ /dev/null @@ -1,22 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use snafu::Snafu; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] -pub enum InspectError { - #[snafu(display("health check error"))] - HealthCheckError { - source: http_health_check::HealthCheckError, - }, - - #[snafu(display("server error"))] - ServerError { source: std::io::Error }, - - #[snafu(display("Failed to connect to server manager: {}", message))] - FailedToConnect { message: String }, - - #[snafu(display("Failed to inspect state: {}", message))] - InspectFailed { message: String }, -} diff --git a/offchain/inspect-server/src/inspect.rs b/offchain/inspect-server/src/inspect.rs deleted file mode 100644 index de5665cae..000000000 --- a/offchain/inspect-server/src/inspect.rs +++ /dev/null @@ -1,115 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use tokio::sync::{mpsc, oneshot}; -use tonic::Request; -use uuid::Uuid; - -use crate::config::InspectServerConfig; -use crate::error::InspectError; - -use grpc_interfaces::cartesi_server_manager::{ - server_manager_client::ServerManagerClient, InspectStateRequest, -}; -pub use grpc_interfaces::cartesi_server_manager::{ - CompletionStatus, InspectStateResponse, Report, -}; - -#[derive(Clone)] -pub struct InspectClient { - inspect_tx: mpsc::Sender, -} - -/// The inspect client is a wrapper that just sends the inspect requests to another thread and -/// waits for the result. The actual request to the server manager is done by the handle_inspect -/// function. -impl InspectClient { - pub fn new(config: &InspectServerConfig) -> Self { - let (inspect_tx, inspect_rx) = mpsc::channel(config.queue_size); - let address = config.server_manager_address.clone(); - let session_id = config.session_id.clone(); - tokio::spawn(handle_inspect(address, session_id, inspect_rx)); - Self { inspect_tx } - } - - pub async fn inspect( - &self, - payload: Vec, - ) -> Result { - let (response_tx, response_rx) = oneshot::channel(); - let request = InspectRequest { - payload, - response_tx, - }; - if let Err(e) = self.inspect_tx.try_send(request) { - return Err(InspectError::InspectFailed { - message: e.to_string(), - }); - } else { - tracing::debug!("inspect request added to the queue"); - } - response_rx.await.expect("handle_inspect never fails") - } -} - -struct InspectRequest { - payload: Vec, - response_tx: oneshot::Sender>, -} - -fn respond( - response_tx: oneshot::Sender>, - response: Result, -) { - if response_tx.send(response).is_err() { - tracing::warn!("failed to respond inspect request (client dropped)"); - } -} - -/// Loop that answers requests comming from inspect_rx. -async fn handle_inspect( - address: String, - session_id: String, - mut inspect_rx: mpsc::Receiver, -) { - let endpoint = format!("http://{}", address); - while let Some(request) = inspect_rx.recv().await { - match ServerManagerClient::connect(endpoint.clone()).await { - Err(e) => { - respond( - request.response_tx, - Err(InspectError::FailedToConnect { - message: e.to_string(), - }), - ); - } - Ok(mut client) => { - let request_id = Uuid::new_v4().to_string(); - let grpc_request = InspectStateRequest { - session_id: session_id.clone(), - query_payload: request.payload, - }; - - tracing::debug!( - "calling grpc inspect_state request={:?} request_id={}", - grpc_request, - request_id - ); - let mut grpc_request = Request::new(grpc_request); - grpc_request - .metadata_mut() - .insert("request-id", request_id.parse().unwrap()); - let grpc_response = client.inspect_state(grpc_request).await; - - tracing::debug!("got grpc response from inspect_state response={:?} request_id={}", grpc_response, request_id); - - let response = grpc_response - .map(|result| result.into_inner()) - .map_err(|e| InspectError::InspectFailed { - message: e.message().to_string(), - }); - respond(request.response_tx, response); - } - } - } -} diff --git a/offchain/inspect-server/src/lib.rs b/offchain/inspect-server/src/lib.rs deleted file mode 100644 index ab3510bd0..000000000 --- a/offchain/inspect-server/src/lib.rs +++ /dev/null @@ -1,29 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use error::InspectError; -use snafu::ResultExt; - -pub use config::InspectServerConfig; -pub use inspect::InspectClient; - -pub mod config; -mod error; -pub mod inspect; -pub mod server; - -#[tracing::instrument(level = "trace", skip_all)] -pub async fn run(config: InspectServerConfig) -> Result<(), InspectError> { - let health_handle = http_health_check::start(config.healthcheck_port); - let inspect_client = InspectClient::new(&config); - let inspect_server = - server::create(&config, inspect_client).context(error::ServerSnafu)?; - tokio::select! { - ret = health_handle => { - ret.context(error::HealthCheckSnafu) - } - ret = inspect_server => { - ret.context(error::ServerSnafu) - } - } -} diff --git a/offchain/inspect-server/src/main.rs b/offchain/inspect-server/src/main.rs deleted file mode 100644 index 07cff6f75..000000000 --- a/offchain/inspect-server/src/main.rs +++ /dev/null @@ -1,17 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; - -use inspect_server::{config::CLIConfig, InspectServerConfig}; - -#[tokio::main] -async fn main() -> Result<(), Box> { - let config: InspectServerConfig = CLIConfig::parse().into(); - - log::configure(&config.log_config); - - log::log_service_start(&config, "Inspect Server"); - - inspect_server::run(config).await.map_err(|e| e.into()) -} diff --git a/offchain/inspect-server/src/server.rs b/offchain/inspect-server/src/server.rs deleted file mode 100644 index b02b03831..000000000 --- a/offchain/inspect-server/src/server.rs +++ /dev/null @@ -1,143 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use actix_cors::Cors; -use actix_web::{ - dev::Server, error, web, App, HttpRequest, HttpResponse, HttpServer, - Responder, -}; -use serde::{Deserialize, Serialize}; -use tracing_actix_web::TracingLogger; - -use crate::config::InspectServerConfig; -use crate::error::InspectError; -use crate::inspect::{ - CompletionStatus, InspectClient, InspectStateResponse, Report, -}; - -// 2^20 bytes, which is the length of the RX buffer -pub const CARTESI_MACHINE_RX_BUFFER_LIMIT: usize = 1_048_576; - -pub fn create( - config: &InspectServerConfig, - inspect_client: InspectClient, -) -> std::io::Result { - let server = HttpServer::new(move || { - let cors = Cors::permissive(); - App::new() - .app_data(web::Data::new(inspect_client.clone())) - .app_data(web::PayloadConfig::new(CARTESI_MACHINE_RX_BUFFER_LIMIT)) - .wrap(TracingLogger::default()) - .wrap(cors) - .service(inspect_get) - .service(inspect_post) - }) - .bind(config.inspect_server_address.clone())? - .run(); - Ok(server) -} - -#[actix_web::get("/inspect/{payload:.*}")] -async fn inspect_get( - request: HttpRequest, - payload: web::Path, - inspect_client: web::Data, -) -> actix_web::error::Result { - let mut payload = payload.into_inner(); - if let Some(query) = request.uri().query() { - payload = payload + "?" + query; - } - let payload = payload.as_bytes().to_vec(); - let response = inspect_client.inspect(payload).await?; - let http_response = HttpInspectResponse::from(response); - Ok(HttpResponse::Ok().json(http_response)) -} - -#[actix_web::post("/inspect")] -async fn inspect_post( - payload: web::Bytes, - inspect_client: web::Data, -) -> actix_web::error::Result { - let response = inspect_client.inspect(payload.to_vec()).await?; - let http_response = HttpInspectResponse::from(response); - Ok(HttpResponse::Ok().json(http_response)) -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct HttpInspectResponse { - pub status: String, - pub exception_payload: Option, - pub reports: Vec, - pub processed_input_count: u64, -} - -impl From for HttpInspectResponse { - fn from(response: InspectStateResponse) -> HttpInspectResponse { - let reports = - response.reports.into_iter().map(HttpReport::from).collect(); - HttpInspectResponse { - status: convert_status(response.status), - exception_payload: response.exception_data.map(hex_encode), - reports, - processed_input_count: response.processed_input_count, - } - } -} - -fn convert_status(status: i32) -> String { - // Unfortunaly, the gRPC interface uses i32 instead of a Enum type, - // so it is clearer to use if-else instead of match. - if status == CompletionStatus::Accepted as i32 { - String::from("Accepted") - } else if status == CompletionStatus::Rejected as i32 { - String::from("Rejected") - } else if status == CompletionStatus::Exception as i32 { - String::from("Exception") - } else if status == CompletionStatus::MachineHalted as i32 { - String::from("MachineHalted") - } else if status == CompletionStatus::CycleLimitExceeded as i32 { - String::from("CycleLimitExceeded") - } else if status == CompletionStatus::TimeLimitExceeded as i32 { - String::from("TimeLimitExceeded") - } else if status == CompletionStatus::PayloadLengthLimitExceeded as i32 { - String::from("PayloadLengthLimitExceeded") - } else { - tracing::error!( - "Invalid status received from server-manager: {}", - status - ); - String::from("Unknown") - } -} - -#[derive(Debug, Deserialize, Serialize)] -pub struct HttpReport { - pub payload: String, -} - -impl From for HttpReport { - fn from(report: Report) -> HttpReport { - HttpReport { - payload: hex_encode(report.payload), - } - } -} - -fn hex_encode(payload: Vec) -> String { - String::from("0x") + &hex::encode(payload) -} - -impl From for error::Error { - fn from(e: InspectError) -> error::Error { - tracing::warn!("{}", e.to_string()); - match e { - InspectError::FailedToConnect { .. } => { - error::ErrorBadGateway(e.to_string()) - } - InspectError::InspectFailed { .. } => { - error::ErrorBadRequest(e.to_string()) - } - _ => error::ErrorBadGateway(e.to_string()), - } - } -} diff --git a/offchain/inspect-server/tests/common/mod.rs b/offchain/inspect-server/tests/common/mod.rs deleted file mode 100644 index 175577afd..000000000 --- a/offchain/inspect-server/tests/common/mod.rs +++ /dev/null @@ -1,311 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -#![allow(dead_code)] - -use actix_web::dev::ServerHandle; -use inspect_server::config::InspectServerConfig; -use log::LogConfig; -pub use reqwest::StatusCode; -use std::sync::Arc; -use tokio::sync::{oneshot, Notify}; -use tokio::task::JoinHandle; -use tonic::{transport::Server, Request, Response, Status}; - -use grpc_interfaces::cartesi_machine::Void; -use grpc_interfaces::cartesi_server_manager::{ - server_manager_server::{ServerManager, ServerManagerServer}, - AdvanceStateRequest, DeleteEpochRequest, EndSessionRequest, - FinishEpochRequest, FinishEpochResponse, GetEpochStatusRequest, - GetEpochStatusResponse, GetSessionStatusRequest, GetSessionStatusResponse, - GetStatusResponse, InspectStateRequest, InspectStateResponse, - StartSessionRequest, StartSessionResponse, -}; -pub use grpc_interfaces::cartesi_server_manager::{CompletionStatus, Report}; -use grpc_interfaces::versioning::GetVersionResponse; - -use inspect_server::inspect::InspectClient; -use inspect_server::server::HttpInspectResponse; - -pub const SERVER_MANAGER_ADDRESS: &'static str = "127.0.0.1:50001"; -pub const INSPECT_SERVER_ADDRESS: &'static str = "127.0.0.1:50002"; -pub const SESSION_ID: &'static str = "default session"; -pub const ACTIVE_EPOCH_INDEX: u64 = 123; -pub const PROCESSED_INPUT_COUNT: u64 = 456; -pub const QUEUE_SIZE: usize = 3; - -pub struct TestState { - server_manager: MockServerManagerWrapper, - inspect_server: InspectServerWrapper, -} - -impl TestState { - /// Start the inspect-server and the mock-server-manager - pub async fn setup(mock: impl MockInspect) -> Self { - let server_manager = MockServerManagerWrapper::start(mock).await; - let inspect_server = InspectServerWrapper::start().await; - Self { - server_manager, - inspect_server, - } - } - - /// Shutdown both servers. - /// This function cannot be implemented as the drop trait because it is async. - pub async fn teardown(self) { - self.inspect_server.stop().await; - self.server_manager.stop().await; - } -} - -#[derive(Clone, Debug, Default)] -pub struct MockInspectResponse { - pub reports: Vec, - pub exception: Option>, - pub completion_status: CompletionStatus, -} - -#[tonic::async_trait] -pub trait MockInspect: Send + Sync + 'static { - async fn inspect_state(&self, payload: Vec) -> MockInspectResponse; -} - -pub struct InspectServerWrapper { - server_handle: ServerHandle, - join_handle: JoinHandle<()>, -} - -impl InspectServerWrapper { - /// Start the inspect server in another thread. - /// This function blocks until the server is ready. - pub async fn start() -> Self { - let inspect_server_config = InspectServerConfig { - inspect_server_address: INSPECT_SERVER_ADDRESS.to_string(), - server_manager_address: SERVER_MANAGER_ADDRESS.to_string(), - session_id: SESSION_ID.to_string(), - queue_size: QUEUE_SIZE, - healthcheck_port: 0, - log_config: LogConfig::default(), - }; - - let inspect_client = InspectClient::new(&inspect_server_config); - let (handle_tx, handle_rx) = oneshot::channel(); - let join_handle = tokio::spawn(async move { - let server = inspect_server::server::create( - &inspect_server_config, - inspect_client, - ) - .expect("failed to start inspect server"); - handle_tx - .send(server.handle()) - .expect("failed to send server handle"); - server.await.expect("inspect server execution failed"); - }); - let server_handle = - handle_rx.await.expect("failed to received server handle"); - Self { - server_handle, - join_handle, - } - } - - /// Stop the inspect server. - /// This function blocks util the server is shut down. - pub async fn stop(self) { - self.server_handle.stop(true).await; - self.join_handle - .await - .expect("failed to stop inspect server"); - } -} - -pub struct MockServerManagerWrapper { - shutdown: Arc, - join_handle: JoinHandle<()>, -} - -impl MockServerManagerWrapper { - /// Start the server manager in another thread. - /// This function blocks until the server is ready. - pub async fn start(mock: impl MockInspect) -> Self { - let service = MockServerManager { mock }; - let address = SERVER_MANAGER_ADDRESS.parse().expect("invalid address"); - let ready = Arc::new(Notify::new()); - let shutdown = Arc::new(Notify::new()); - let join_handle = { - let ready = ready.clone(); - let shutdown = shutdown.clone(); - tokio::spawn(async move { - let server = Server::builder() - .add_service(ServerManagerServer::new(service)) - .serve_with_shutdown(address, shutdown.notified()); - ready.notify_one(); - server.await.expect("failed to start server manager"); - }) - }; - ready.notified().await; - Self { - shutdown, - join_handle, - } - } - - /// Stop the server manager. - /// This function blocks until the server is shut down. - pub async fn stop(self) { - self.shutdown.notify_one(); - self.join_handle - .await - .expect("failed to shutdown server manager"); - } -} - -struct MockServerManager { - mock: T, -} - -#[tonic::async_trait] -impl ServerManager for MockServerManager { - async fn inspect_state( - &self, - request: Request, - ) -> Result, Status> { - let mock_response = self - .mock - .inspect_state(request.into_inner().query_payload) - .await; - let response = InspectStateResponse { - session_id: SESSION_ID.to_string(), - active_epoch_index: ACTIVE_EPOCH_INDEX, - processed_input_count: PROCESSED_INPUT_COUNT, - exception_data: mock_response.exception, - status: mock_response.completion_status as i32, - reports: mock_response.reports, - }; - Ok(Response::new(response)) - } - - async fn get_version( - &self, - _: Request, - ) -> Result, Status> { - unimplemented!() - } - - async fn start_session( - &self, - _: Request, - ) -> Result, Status> { - unimplemented!() - } - - async fn end_session( - &self, - _: Request, - ) -> Result, Status> { - unimplemented!() - } - - async fn advance_state( - &self, - _: Request, - ) -> Result, Status> { - unimplemented!() - } - - async fn finish_epoch( - &self, - _: Request, - ) -> Result, Status> { - unimplemented!() - } - - async fn get_status( - &self, - _: Request, - ) -> Result, Status> { - unimplemented!() - } - - async fn get_session_status( - &self, - _: Request, - ) -> Result, Status> { - unimplemented!() - } - - async fn get_epoch_status( - &self, - _: Request, - ) -> Result, Status> { - unimplemented!() - } - - async fn delete_epoch( - &self, - _: Request, - ) -> Result, Status> { - unimplemented!() - } -} - -/// Send an inspect-state request to the inspect server via GET. -/// If the status code is 200, return the HttpInspectResponse. -/// Else, return the status code and the error message. -pub async fn send_get_request( - payload: &str, -) -> Result { - let url = format!("http://{}/inspect/{}", INSPECT_SERVER_ADDRESS, payload); - let response = reqwest::get(url) - .await - .expect("failed to send inspect via GET"); - let status = response.status(); - if status == 200 { - let response = response - .json::() - .await - .expect("failed to decode json response"); - Ok(response) - } else { - let message = response - .text() - .await - .expect("failed to obtain response body"); - Err((status, message)) - } -} - -/// Send an inspect-state request to the inspect server via POST. -/// If the status code is 200, return the HttpInspectResponse. -/// Else, return the status code and the error message. -pub async fn send_post_request( - payload: &str, -) -> Result { - let url = format!("http://{}/inspect", INSPECT_SERVER_ADDRESS); - let client = reqwest::Client::new(); - let response = client - .post(url) - .body(String::from(payload)) - .send() - .await - .expect("failed to send inspect via POST"); - let status = response.status(); - if status == 200 { - let response = response - .json::() - .await - .expect("failed to decode json response"); - Ok(response) - } else { - let message = response - .text() - .await - .expect("failed to obtain response body"); - Err((status, message)) - } -} - -/// Convert binary value to the hex format -pub fn hex_to_bin(payload: &Vec) -> String { - String::from("0x") + &hex::encode(payload) -} diff --git a/offchain/inspect-server/tests/payload.rs b/offchain/inspect-server/tests/payload.rs deleted file mode 100644 index 19baeff56..000000000 --- a/offchain/inspect-server/tests/payload.rs +++ /dev/null @@ -1,135 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod common; -use crate::common::*; -use inspect_server::server::CARTESI_MACHINE_RX_BUFFER_LIMIT; - -struct EchoInspect {} - -#[tonic::async_trait] -impl MockInspect for EchoInspect { - async fn inspect_state(&self, payload: Vec) -> MockInspectResponse { - MockInspectResponse { - reports: vec![Report { payload }], - exception: None, - completion_status: CompletionStatus::Accepted, - } - } -} - -async fn test_get_payload(sent_payload: &str, expected_payload: &str) { - let test_state = TestState::setup(EchoInspect {}).await; - let response = send_get_request(sent_payload) - .await - .expect("failed to obtain response"); - assert_eq!(response.status, "Accepted"); - assert_eq!(response.exception_payload, None); - assert_eq!(response.reports.len(), 1); - let expected_payload = String::from("0x") + &hex::encode(expected_payload); - assert_eq!(response.reports[0].payload, expected_payload); - test_state.teardown().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_simple_payload() { - test_get_payload("hello", "hello").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_payload_with_spaces() { - test_get_payload("hello world", "hello world").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_url_encoded_payload() { - test_get_payload("hello%20world", "hello world").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_payload_with_slashes() { - test_get_payload("user/123/name", "user/123/name").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_payload_with_path_and_query() { - test_get_payload( - "user/data?key=value&key2=value2", - "user/data?key=value&key2=value2", - ) - .await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_raw_json_payload() { - test_get_payload( - r#"{"key": ["value1", "value2"]}"#, - r#"{"key": ["value1", "value2"]}"#, - ) - .await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_empty_payload() { - test_get_payload("", "").await; -} - -async fn test_post_payload(sent_payload: &str, expected_payload: &str) { - let test_state = TestState::setup(EchoInspect {}).await; - let response = send_post_request(sent_payload) - .await - .expect("failed to obtain response"); - assert_eq!(response.status, "Accepted"); - assert_eq!(response.exception_payload, None); - assert_eq!(response.reports.len(), 1); - let expected_payload = String::from("0x") + &hex::encode(expected_payload); - assert_eq!(response.reports[0].payload, expected_payload); - test_state.teardown().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_empty_payload() { - test_post_payload("", "").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_simple_payload() { - test_post_payload("hello", "hello").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_raw_json_payload() { - test_post_payload( - r#"{"key": ["value1", "value2"]}"#, - r#"{"key": ["value1", "value2"]}"#, - ) - .await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_payload_on_limit() { - let payload = "0".repeat(CARTESI_MACHINE_RX_BUFFER_LIMIT); - test_post_payload(&payload.clone(), &payload.clone()).await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_fails_when_payload_over_limit() { - let payload = "0".repeat(CARTESI_MACHINE_RX_BUFFER_LIMIT + 1); - let test_state = TestState::setup(EchoInspect {}).await; - send_post_request(&payload) - .await - .expect_err("Payload reached size limit"); - test_state.teardown().await; -} diff --git a/offchain/inspect-server/tests/queue.rs b/offchain/inspect-server/tests/queue.rs deleted file mode 100644 index 50a96b256..000000000 --- a/offchain/inspect-server/tests/queue.rs +++ /dev/null @@ -1,242 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod common; -use crate::common::*; - -use futures::stream::FuturesUnordered; -use futures::StreamExt; -use tokio::sync::{mpsc, Mutex}; - -struct SyncInspect { - response_rx: Mutex>, -} - -#[tonic::async_trait] -impl MockInspect for SyncInspect { - async fn inspect_state(&self, _: Vec) -> MockInspectResponse { - self.response_rx.lock().await.recv().await.unwrap() - } -} - -impl SyncInspect { - fn setup() -> (Self, mpsc::Sender) { - let (response_tx, response_rx) = mpsc::channel(1000); - let mock = SyncInspect { - response_rx: Mutex::new(response_rx), - }; - (mock, response_tx) - } -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_error_when_server_manager_is_down() { - let inspect_server = InspectServerWrapper::start().await; - let (status, message) = send_get_request("hello") - .await - .expect_err("failed to obtain response"); - assert_eq!(status, StatusCode::BAD_GATEWAY); - assert_eq!( - &message, - "Failed to connect to server manager: transport error" - ); - inspect_server.stop().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_succeeds_after_server_manager_starts() { - let inspect_server = InspectServerWrapper::start().await; - let (status, _) = send_get_request("hello") - .await - .expect_err("failed to obtain response"); - assert_eq!(status, StatusCode::BAD_GATEWAY); - let (mock, response_tx) = SyncInspect::setup(); - let server_manager = MockServerManagerWrapper::start(mock).await; - // Add response to queue before sending request - response_tx - .send(MockInspectResponse::default()) - .await - .expect("failed to send response"); - send_get_request("hello") - .await - .expect("failed to obtain response"); - server_manager.stop().await; - inspect_server.stop().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_it_handle_concurrent_inspect_requests() { - let (mock, response_tx) = SyncInspect::setup(); - let state = TestState::setup(mock).await; - // Send multiple concurrent requests - let handlers: Vec<_> = (0..QUEUE_SIZE) - .map(|_| { - tokio::spawn(async { - send_get_request("hello") - .await - .expect("failed to obtain response"); - }) - }) - .collect(); - // Wait until the requests arrive in the inspect-server - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - // Add the responses to the queue - for _ in 0..QUEUE_SIZE { - response_tx - .send(MockInspectResponse::default()) - .await - .expect("failed to send response"); - } - // Check the responses - for handler in handlers { - handler.await.expect("failed to wait handler"); - } - state.teardown().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_it_returns_error_when_queue_is_full() { - let (mock, response_tx) = SyncInspect::setup(); - let state = TestState::setup(mock).await; - // Send concurrent requests to overflow the queue. - // We need to 2 extra requests to overflow the queue because the first message will be - // imediatelly consumed and removed from the queue. - let mut handlers = FuturesUnordered::new(); - for _ in 0..(QUEUE_SIZE + 2) { - handlers.push(tokio::spawn(send_get_request("hello"))); - } - // Poll the handlers to find the overflow error - let (status, message) = handlers - .next() - .await - .expect("failed to poll") - .expect("failed to join handler") - .expect_err("failed to receive error"); - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!( - message, - String::from("Failed to inspect state: no available capacity") - ); - // Add the responses to the queue - for _ in 0..(QUEUE_SIZE + 1) { - response_tx - .send(MockInspectResponse::default()) - .await - .expect("failed to send response"); - } - // Wait for responses so we don't have zombie threads - while let Some(handler) = handlers.next().await { - let _ = handler.expect("failed to join handler"); - } - state.teardown().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_error_when_server_manager_is_down() { - let inspect_server = InspectServerWrapper::start().await; - let (status, message) = send_get_request("hello") - .await - .expect_err("failed to obtain response"); - assert_eq!(status, StatusCode::BAD_GATEWAY); - assert_eq!( - &message, - "Failed to connect to server manager: transport error" - ); - inspect_server.stop().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_succeeds_after_server_manager_starts() { - let inspect_server = InspectServerWrapper::start().await; - let (status, _) = send_post_request("hello") - .await - .expect_err("failed to obtain response"); - assert_eq!(status, StatusCode::BAD_GATEWAY); - let (mock, response_tx) = SyncInspect::setup(); - let server_manager = MockServerManagerWrapper::start(mock).await; - // Add response to queue before sending request - response_tx - .send(MockInspectResponse::default()) - .await - .expect("failed to send response"); - send_post_request("hello") - .await - .expect("failed to obtain response"); - server_manager.stop().await; - inspect_server.stop().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_it_handle_concurrent_inspect_requests() { - let (mock, response_tx) = SyncInspect::setup(); - let state = TestState::setup(mock).await; - // Send multiple concurrent requests - let handlers: Vec<_> = (0..QUEUE_SIZE) - .map(|_| { - tokio::spawn(async { - send_post_request("hello") - .await - .expect("failed to obtain response"); - }) - }) - .collect(); - // Wait until the requests arrive in the inspect-server - tokio::time::sleep(std::time::Duration::from_millis(100)).await; - // Add the responses to the queue - for _ in 0..QUEUE_SIZE { - response_tx - .send(MockInspectResponse::default()) - .await - .expect("failed to send response"); - } - // Check the responses - for handler in handlers { - handler.await.expect("failed to wait handler"); - } - state.teardown().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_it_returns_error_when_queue_is_full() { - let (mock, response_tx) = SyncInspect::setup(); - let state = TestState::setup(mock).await; - // Send concurrent requests to overflow the queue. - // We need to 2 extra requests to overflow the queue because the first message will be - // imediatelly consumed and removed from the queue. - let mut handlers = FuturesUnordered::new(); - for _ in 0..(QUEUE_SIZE + 2) { - handlers.push(tokio::spawn(send_post_request("hello"))); - } - // Poll the handlers to find the overflow error - let (status, message) = handlers - .next() - .await - .expect("failed to poll") - .expect("failed to join handler") - .expect_err("failed to receive error"); - assert_eq!(status, StatusCode::BAD_REQUEST); - assert_eq!( - message, - String::from("Failed to inspect state: no available capacity") - ); - // Add the responses to the queue - for _ in 0..(QUEUE_SIZE + 1) { - response_tx - .send(MockInspectResponse::default()) - .await - .expect("failed to send response"); - } - // Wait for responses so we don't have zombie threads - while let Some(handler) = handlers.next().await { - let _ = handler.expect("failed to join handler"); - } - state.teardown().await; -} diff --git a/offchain/inspect-server/tests/response.rs b/offchain/inspect-server/tests/response.rs deleted file mode 100644 index a15ee596a..000000000 --- a/offchain/inspect-server/tests/response.rs +++ /dev/null @@ -1,176 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod common; -use crate::common::*; - -struct FixedResponseInspect { - response: MockInspectResponse, -} - -#[tonic::async_trait] -impl MockInspect for FixedResponseInspect { - async fn inspect_state(&self, _: Vec) -> MockInspectResponse { - self.response.clone() - } -} - -async fn test_get_response(sent: MockInspectResponse, expected_status: &str) { - let mock = FixedResponseInspect { - response: sent.clone(), - }; - let state = TestState::setup(mock).await; - let response = send_get_request("") - .await - .expect("failed to obtain response"); - assert_eq!(&response.status, expected_status); - assert_eq!( - response.exception_payload, - sent.exception.as_ref().map(hex_to_bin) - ); - assert_eq!(response.reports.len(), sent.reports.len()); - for (received, sent) in response.reports.iter().zip(sent.reports) { - assert_eq!(received.payload, hex_to_bin(&sent.payload)); - } - assert_eq!(response.processed_input_count, PROCESSED_INPUT_COUNT); - state.teardown().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_response_with_no_reports() { - let response = MockInspectResponse { - reports: vec![], - exception: None, - completion_status: CompletionStatus::Accepted, - }; - test_get_response(response, "Accepted").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_response_with_single_report() { - let response = MockInspectResponse { - reports: vec![Report { - payload: vec![1, 2, 3], - }], - exception: None, - completion_status: CompletionStatus::Accepted, - }; - test_get_response(response, "Accepted").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_response_with_multiple_reports() { - let reports = vec![ - Report { - payload: vec![1, 2, 3], - }, - Report { - payload: vec![4, 5, 6], - }, - Report { - payload: vec![7, 8, 9], - }, - ]; - let response = MockInspectResponse { - reports, - exception: None, - completion_status: CompletionStatus::Accepted, - }; - test_get_response(response, "Accepted").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_get_response_with_reports_and_exception() { - let response = MockInspectResponse { - reports: vec![Report { - payload: vec![1, 2, 3], - }], - exception: Some(vec![4, 5, 6]), - completion_status: CompletionStatus::Exception, - }; - test_get_response(response, "Exception").await; -} - -async fn test_post_response(sent: MockInspectResponse, expected_status: &str) { - let mock = FixedResponseInspect { - response: sent.clone(), - }; - let state = TestState::setup(mock).await; - let response = send_post_request("") - .await - .expect("failed to obtain response"); - assert_eq!(&response.status, expected_status); - assert_eq!( - response.exception_payload, - sent.exception.as_ref().map(hex_to_bin) - ); - assert_eq!(response.reports.len(), sent.reports.len()); - for (received, sent) in response.reports.iter().zip(sent.reports) { - assert_eq!(received.payload, hex_to_bin(&sent.payload)); - } - assert_eq!(response.processed_input_count, PROCESSED_INPUT_COUNT); - state.teardown().await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_response_with_no_reports() { - let response = MockInspectResponse { - reports: vec![], - exception: None, - completion_status: CompletionStatus::Accepted, - }; - test_post_response(response, "Accepted").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_response_with_single_report() { - let response = MockInspectResponse { - reports: vec![Report { - payload: vec![1, 2, 3], - }], - exception: None, - completion_status: CompletionStatus::Accepted, - }; - test_post_response(response, "Accepted").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_response_with_multiple_reports() { - let reports = vec![ - Report { - payload: vec![1, 2, 3], - }, - Report { - payload: vec![4, 5, 6], - }, - Report { - payload: vec![7, 8, 9], - }, - ]; - let response = MockInspectResponse { - reports, - exception: None, - completion_status: CompletionStatus::Accepted, - }; - test_post_response(response, "Accepted").await; -} - -#[tokio::test] -#[serial_test::serial] -async fn test_post_response_with_reports_and_exception() { - let response = MockInspectResponse { - reports: vec![Report { - payload: vec![1, 2, 3], - }], - exception: Some(vec![4, 5, 6]), - completion_status: CompletionStatus::Exception, - }; - test_post_response(response, "Exception").await; -} diff --git a/offchain/log/Cargo.toml b/offchain/log/Cargo.toml deleted file mode 100644 index b8b1f33d8..000000000 --- a/offchain/log/Cargo.toml +++ /dev/null @@ -1,13 +0,0 @@ -[package] -name = "log" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -clap = { workspace = true, features = ["derive", "env"] } -tracing.workspace = true -tracing-subscriber = { workspace = true, features = ["env-filter"] } - -[build-dependencies] -built = { workspace = true, features = ["git2"] } diff --git a/offchain/log/build.rs b/offchain/log/build.rs deleted file mode 100644 index 5b20b4b6d..000000000 --- a/offchain/log/build.rs +++ /dev/null @@ -1,7 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub(crate) fn main() { - built::write_built_file() - .expect("Failed to acquire build-time information"); -} diff --git a/offchain/redacted/Cargo.toml b/offchain/redacted/Cargo.toml deleted file mode 100644 index 6592c3db4..000000000 --- a/offchain/redacted/Cargo.toml +++ /dev/null @@ -1,8 +0,0 @@ -[package] -name = "redacted" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -url.workspace = true diff --git a/offchain/rollups-events/Cargo.toml b/offchain/rollups-events/Cargo.toml deleted file mode 100644 index 7eda7413b..000000000 --- a/offchain/rollups-events/Cargo.toml +++ /dev/null @@ -1,35 +0,0 @@ -[package] -name = "rollups-events" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -redacted = { path = "../redacted" } - -backoff = { workspace = true, features = ["tokio"] } -base64.workspace = true -clap = { workspace = true, features = ["derive", "env"] } -hex.workspace = true -prometheus-client.workspace = true -serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true -snafu.workspace = true -tokio = { workspace = true, features = ["macros", "time", "rt-multi-thread"] } -tracing.workspace = true - -redis = { workspace = true, features = [ - "streams", - "tokio-comp", - "connection-manager", - "tls-native-tls", - "tokio-native-tls-comp", - "cluster", - "cluster-async" -] } - -[dev-dependencies] -env_logger.workspace = true -test-log = { workspace = true, features = ["trace"] } -testcontainers.workspace = true -tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/offchain/rollups-events/README.md b/offchain/rollups-events/README.md deleted file mode 100644 index 4de838318..000000000 --- a/offchain/rollups-events/README.md +++ /dev/null @@ -1,8 +0,0 @@ -# Rollups Events - -This crate works as an abstraction layer for producing and consuming Cartesi Rollups events. -Currently, it uses Redis Streams as the event broker and defines the following event streams: - -- `rollups-inputs`, for exchanging *Input* events; -- `rollups-outputs`, for exchanging *Output* events; -- `rollups-claims`, for exchanging *Claim* events. diff --git a/offchain/rollups-events/src/broker/indexer.rs b/offchain/rollups-events/src/broker/indexer.rs deleted file mode 100644 index 01422167d..000000000 --- a/offchain/rollups-events/src/broker/indexer.rs +++ /dev/null @@ -1,106 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -//! This module is an indexer-specific extension for the broker -//! -//! It would be too complex to implement the indexer extension as a generic broker method. -//! Instead, we decided to implement the extension that we need for the indexer as a submodule. -//! This extension should be in this crate because it accesses the Redis interface directly. -//! (All Redis interaction should be hidden in this crate.) -use backoff::future::retry; -use redis::streams::{StreamReadOptions, StreamReadReply}; -use redis::AsyncCommands; -use snafu::ResultExt; - -use super::ConnectionSnafu; -use crate::{ - Broker, BrokerError, BrokerStream, DAppMetadata, Event, RollupsInput, - RollupsInputsStream, RollupsOutput, RollupsOutputsStream, INITIAL_ID, -}; - -#[derive(Debug, Clone, Eq, PartialEq)] -pub enum IndexerEvent { - Input(Event), - Output(Event), -} - -#[derive(Debug)] -pub struct IndexerState { - inputs_last_id: String, - outputs_last_id: String, - inputs_stream: RollupsInputsStream, - outputs_stream: RollupsOutputsStream, -} - -impl IndexerState { - pub fn new(dapp_metadata: &DAppMetadata) -> Self { - Self { - inputs_last_id: INITIAL_ID.to_owned(), - outputs_last_id: INITIAL_ID.to_owned(), - inputs_stream: RollupsInputsStream::new(dapp_metadata), - outputs_stream: RollupsOutputsStream::new(dapp_metadata), - } - } -} - -impl Broker { - /// Consume an event from the Input stream and if there is none, - /// consume from the Output stream. This is a blocking operation. - /// Return IndexerEvent::Input if present or IndexerEvent::Output otherwise - #[tracing::instrument(level = "trace", skip_all)] - pub async fn indexer_consume( - &self, - state: &mut IndexerState, - ) -> Result { - let input_stream_key = state.inputs_stream.key(); - let output_stream_key = state.outputs_stream.key(); - let mut reply = retry(self.backoff.clone(), || async { - let stream_keys = [&input_stream_key, &output_stream_key]; - let last_consumed_ids = - [&state.inputs_last_id, &state.outputs_last_id]; - tracing::trace!( - ?stream_keys, - ?last_consumed_ids, - "consuming event" - ); - let opts = StreamReadOptions::default() - .count(1) - .block(self.consume_timeout); - let reply: StreamReadReply = self - .connection - .clone() - .xread_options(&stream_keys, &last_consumed_ids, &opts) - .await?; - Ok(reply) - }) - .await - .context(ConnectionSnafu)?; - - let input_stream_id = reply - .keys - .iter_mut() - .find(|stream| stream.key == input_stream_key) - .and_then(|stream| stream.ids.pop()); - if let Some(stream_id) = input_stream_id { - tracing::trace!("found input event; parsing it"); - let event: Event = stream_id.try_into()?; - state.inputs_last_id = event.id.clone(); - return Ok(IndexerEvent::Input(event)); - } - - let output_stream_id = reply - .keys - .iter_mut() - .find(|stream| stream.key == output_stream_key) - .and_then(|stream| stream.ids.pop()); - if let Some(stream_id) = output_stream_id { - tracing::trace!("found output event; parsing it"); - let event: Event = stream_id.try_into()?; - state.outputs_last_id = event.id.clone(); - return Ok(IndexerEvent::Output(event)); - } - - tracing::trace!("indexer consume timed out"); - Err(BrokerError::ConsumeTimeout) - } -} diff --git a/offchain/rollups-events/src/lib.rs b/offchain/rollups-events/src/lib.rs deleted file mode 100644 index 8c75b8ff8..000000000 --- a/offchain/rollups-events/src/lib.rs +++ /dev/null @@ -1,26 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod broker; -mod common; -mod rollups_claims; -mod rollups_inputs; -mod rollups_outputs; -mod rollups_stream; - -pub use broker::{ - indexer, Broker, BrokerCLIConfig, BrokerConfig, BrokerEndpoint, - BrokerError, BrokerStream, Event, RedactedUrl, Url, INITIAL_ID, -}; -pub use common::{Address, Hash, Payload, ADDRESS_SIZE, HASH_SIZE}; -pub use rollups_claims::{RollupsClaim, RollupsClaimsStream}; -pub use rollups_inputs::{ - InputMetadata, RollupsAdvanceStateInput, RollupsData, RollupsInput, - RollupsInputsStream, -}; -pub use rollups_outputs::{ - RollupsAdvanceResult, RollupsCompletionStatus, RollupsNotice, - RollupsOutput, RollupsOutputEnum, RollupsOutputValidityProof, - RollupsOutputsStream, RollupsProof, RollupsReport, RollupsVoucher, -}; -pub use rollups_stream::{DAppMetadata, DAppMetadataCLIConfig}; diff --git a/offchain/rollups-events/src/rollups_inputs.rs b/offchain/rollups-events/src/rollups_inputs.rs deleted file mode 100644 index a5069414b..000000000 --- a/offchain/rollups-events/src/rollups_inputs.rs +++ /dev/null @@ -1,69 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use serde::{Deserialize, Serialize}; - -use crate::{rollups_stream::decl_broker_stream, Address, Hash, Payload}; - -decl_broker_stream!(RollupsInputsStream, RollupsInput, "rollups-inputs"); - -/// Cartesi Rollups event -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct RollupsInput { - /// Id of the parent of the event - /// This field must be supplied by the producer of the event. - /// Notice that the parent might not be the latest event in the stream; - /// this happens during a reorg. - /// The parent of the first event should be INITIAL_ID. - pub parent_id: String, - - /// Epoch index - pub epoch_index: u64, - - /// Number of sent inputs for all epochs - pub inputs_sent_count: u64, - - /// Data that depends on the kind of event - pub data: RollupsData, -} - -/// Rollups data enumeration -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub enum RollupsData { - /// Input that advances the Cartesi Rollups epoch - AdvanceStateInput(RollupsAdvanceStateInput), - - /// End of an Cartesi Rollups epoch - FinishEpoch {}, -} - -/// Input that advances the Cartesi Rollups epoch -#[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct RollupsAdvanceStateInput { - /// Information sent via the input metadata memory range - pub metadata: InputMetadata, - - /// Payload of the input - pub payload: Payload, - - /// Transaction hash - pub tx_hash: Hash, -} - -#[derive(Default, Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct InputMetadata { - /// Address of the message sender - pub msg_sender: Address, - - /// Block number when the input was posted - pub block_number: u64, - - /// Timestamp of the block - pub timestamp: u64, - - /// Epoch index - pub epoch_index: u64, - - /// Input index in epoch - pub input_index: u64, -} diff --git a/offchain/rollups-events/src/rollups_outputs.rs b/offchain/rollups-events/src/rollups_outputs.rs deleted file mode 100644 index 57c8bc0c3..000000000 --- a/offchain/rollups-events/src/rollups_outputs.rs +++ /dev/null @@ -1,88 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -//! For more information about each type, see the GraphQL API definition in -//! `offchain/graphql-server/schema.graphql` -use serde::{Deserialize, Serialize}; - -use crate::{rollups_stream::decl_broker_stream, Address, Hash, Payload}; - -decl_broker_stream!(RollupsOutputsStream, RollupsOutput, "rollups-outputs"); - -/// Cartesi output -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub enum RollupsOutput { - AdvanceResult(RollupsAdvanceResult), - Voucher(RollupsVoucher), - Notice(RollupsNotice), - Report(RollupsReport), - Proof(RollupsProof), -} - -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub struct RollupsAdvanceResult { - pub input_index: u64, - pub status: RollupsCompletionStatus, -} - -/// Based on CompletionStatus from the server-manager gRPC interface -#[derive(Debug, Clone, Eq, PartialEq, Serialize, Deserialize)] -pub enum RollupsCompletionStatus { - Accepted, - Rejected, - Exception, - MachineHalted, - CycleLimitExceeded, - TimeLimitExceeded, - PayloadLengthLimitExceeded, -} - -#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] -pub struct RollupsVoucher { - pub index: u64, - pub input_index: u64, - pub destination: Address, - pub payload: Payload, -} - -#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] -pub struct RollupsNotice { - pub index: u64, - pub input_index: u64, - pub payload: Payload, -} - -#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] -pub struct RollupsReport { - pub index: u64, - pub input_index: u64, - pub payload: Payload, -} - -#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] -pub enum RollupsOutputEnum { - #[default] - Voucher, - Notice, -} - -#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] -pub struct RollupsProof { - pub input_index: u64, - pub output_index: u64, - pub output_enum: RollupsOutputEnum, - pub validity: RollupsOutputValidityProof, - pub context: Payload, -} - -#[derive(Debug, Clone, Default, Eq, PartialEq, Serialize, Deserialize)] -pub struct RollupsOutputValidityProof { - pub input_index_within_epoch: u64, - pub output_index_within_input: u64, - pub output_hashes_root_hash: Hash, - pub vouchers_epoch_root_hash: Hash, - pub notices_epoch_root_hash: Hash, - pub machine_state_hash: Hash, - pub output_hash_in_output_hashes_siblings: Vec, - pub output_hashes_in_epoch_siblings: Vec, -} diff --git a/offchain/rollups-events/tests/indexer.rs b/offchain/rollups-events/tests/indexer.rs deleted file mode 100644 index 1d5f84171..000000000 --- a/offchain/rollups-events/tests/indexer.rs +++ /dev/null @@ -1,224 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use backoff::ExponentialBackoff; -use rollups_events::indexer::{IndexerEvent, IndexerState}; -use rollups_events::{ - Address, Broker, BrokerConfig, BrokerEndpoint, BrokerError, BrokerStream, - DAppMetadata, Event, Hash, RedactedUrl, RollupsAdvanceStateInput, - RollupsData, RollupsInput, RollupsInputsStream, RollupsOutput, - RollupsOutputsStream, Url, -}; -use testcontainers::{ - clients::Cli, core::WaitFor, images::generic::GenericImage, Container, -}; - -pub const CONSUME_TIMEOUT: usize = 10; -pub const CHAIN_ID: u64 = 99; -pub const DAPP_ADDRESS: Address = Address::new([0xfa; 20]); - -pub struct TestState<'d> { - _node: Container<'d, GenericImage>, - redis_endpoint: RedactedUrl, -} - -impl TestState<'_> { - pub async fn setup(docker: &Cli) -> TestState { - let image = GenericImage::new("redis", "6.2").with_wait_for( - WaitFor::message_on_stdout("Ready to accept connections"), - ); - let node = docker.run(image); - let port = node.get_host_port_ipv4(6379); - let redis_endpoint = Url::parse(&format!("redis://127.0.0.1:{}", port)) - .map(RedactedUrl::new) - .expect("failed to parse Redis Url"); - TestState { - _node: node, - redis_endpoint, - } - } - - pub async fn create_broker(&self) -> Broker { - let backoff = ExponentialBackoff::default(); - let config = BrokerConfig { - redis_endpoint: BrokerEndpoint::Single(self.redis_endpoint.clone()), - consume_timeout: CONSUME_TIMEOUT, - backoff, - }; - Broker::new(config) - .await - .expect("failed to initialize broker") - } -} - -#[test_log::test(tokio::test)] -async fn it_times_out_when_no_indexer_event_is_produced() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - let broker = state.create_broker().await; - let mut indexer_state = IndexerState::new(&dapp_metadata()); - let err = broker - .indexer_consume(&mut indexer_state) - .await - .expect_err("consume event worked but it should have failed"); - assert!(matches!(err, BrokerError::ConsumeTimeout)); -} - -#[test_log::test(tokio::test)] -async fn it_consumes_input_events() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - let mut broker = state.create_broker().await; - // Produce input events - let inputs = generate_inputs(); - let metadata = dapp_metadata(); - let stream = RollupsInputsStream::new(&metadata); - produce_all(&mut broker, &stream, &inputs).await; - // Consume indexer events - let consumed_events = - consume_all(&mut broker, &metadata, inputs.len()).await; - for (event, input) in consumed_events.iter().zip(&inputs) { - assert!(matches!(event, - IndexerEvent::Input( - Event { - payload, - .. - } - ) - if payload == input - )); - } -} - -#[test_log::test(tokio::test)] -async fn it_consumes_output_events() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - let mut broker = state.create_broker().await; - // Produce output events - let outputs = generate_outputs(); - let metadata = dapp_metadata(); - let stream = RollupsOutputsStream::new(&metadata); - produce_all(&mut broker, &stream, &outputs).await; - // Consume indexer events - let consumed_events = - consume_all(&mut broker, &metadata, outputs.len()).await; - for (event, output) in consumed_events.iter().zip(&outputs) { - assert!(matches!(event, - IndexerEvent::Output( - Event { - payload, - .. - } - ) - if payload == output - )); - } -} - -#[test_log::test(tokio::test)] -async fn it_consumes_inputs_before_outputs() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - let mut broker = state.create_broker().await; - // First, produce output events - let outputs = generate_outputs(); - let metadata = dapp_metadata(); - let outputs_stream = RollupsOutputsStream::new(&metadata); - produce_all(&mut broker, &outputs_stream, &outputs).await; - // Then, produce input events - let inputs = generate_inputs(); - let inputs_stream = RollupsInputsStream::new(&metadata); - produce_all(&mut broker, &inputs_stream, &inputs).await; - // Finally, consume indexer events - let consumed_events = - consume_all(&mut broker, &metadata, outputs.len() + inputs.len()).await; - for (i, input) in inputs.iter().enumerate() { - assert!(matches!(&consumed_events[i], - IndexerEvent::Input( - Event { - payload, - .. - } - ) - if payload == input - )); - } - for (i, output) in outputs.iter().enumerate() { - assert!(matches!(&consumed_events[inputs.len() + i], - IndexerEvent::Output( - Event { - payload, - .. - } - ) - if payload == output - )); - } -} - -fn dapp_metadata() -> DAppMetadata { - DAppMetadata { - chain_id: CHAIN_ID, - dapp_address: DAPP_ADDRESS.to_owned(), - } -} - -fn generate_outputs() -> Vec { - vec![ - RollupsOutput::Voucher(Default::default()), - RollupsOutput::Notice(Default::default()), - RollupsOutput::Report(Default::default()), - ] -} - -fn generate_inputs() -> Vec { - vec![ - RollupsInput { - parent_id: "".to_owned(), - epoch_index: 0, - inputs_sent_count: 1, - data: RollupsData::AdvanceStateInput(RollupsAdvanceStateInput { - metadata: Default::default(), - payload: Default::default(), - tx_hash: Hash::default(), - }), - }, - RollupsInput { - parent_id: "".to_owned(), - epoch_index: 0, - inputs_sent_count: 1, - data: RollupsData::FinishEpoch {}, - }, - ] -} - -async fn produce_all( - broker: &mut Broker, - stream: &S, - payloads: &[S::Payload], -) { - for payload in payloads { - broker - .produce(stream, payload.clone()) - .await - .expect("failed to produce"); - } -} - -async fn consume_all( - broker: &mut Broker, - dapp_metadata: &DAppMetadata, - n: usize, -) -> Vec { - let mut state = IndexerState::new(dapp_metadata); - let mut payloads = vec![]; - for _ in 0..n { - let payload = broker - .indexer_consume(&mut state) - .await - .expect("failed to consume indexer payload"); - payloads.push(payload); - } - payloads -} diff --git a/offchain/rollups-events/tests/integration.rs b/offchain/rollups-events/tests/integration.rs deleted file mode 100644 index 6bfaa9cc8..000000000 --- a/offchain/rollups-events/tests/integration.rs +++ /dev/null @@ -1,288 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use backoff::ExponentialBackoff; -use redis::aio::ConnectionManager; -use redis::streams::StreamRangeReply; -use redis::{AsyncCommands, Client}; -use serde::{Deserialize, Serialize}; -use testcontainers::{ - clients::Cli, core::WaitFor, images::generic::GenericImage, Container, -}; - -use rollups_events::{ - Broker, BrokerConfig, BrokerEndpoint, BrokerError, BrokerStream, - RedactedUrl, Url, INITIAL_ID, -}; - -const STREAM_KEY: &'static str = "test-stream"; -const CONSUME_TIMEOUT: usize = 10; - -struct TestState<'d> { - _node: Container<'d, GenericImage>, - redis_endpoint: RedactedUrl, - conn: ConnectionManager, - backoff: ExponentialBackoff, -} - -impl TestState<'_> { - async fn setup(docker: &Cli) -> TestState { - let image = GenericImage::new("redis", "6.2").with_wait_for( - WaitFor::message_on_stdout("Ready to accept connections"), - ); - let node = docker.run(image); - let port = node.get_host_port_ipv4(6379); - let redis_endpoint = Url::parse(&format!("redis://127.0.0.1:{}", port)) - .map(RedactedUrl::new) - .expect("failed to parse Redis Url"); - let backoff = ExponentialBackoff::default(); - - let client = Client::open(redis_endpoint.inner().as_str()) - .expect("failed to create client"); - let conn = ConnectionManager::new(client) - .await - .expect("failed to create connection"); - - TestState { - _node: node, - redis_endpoint, - conn, - backoff, - } - } - - async fn create_broker(&self) -> Broker { - let config = BrokerConfig { - redis_endpoint: BrokerEndpoint::Single(self.redis_endpoint.clone()), - backoff: self.backoff.clone(), - consume_timeout: CONSUME_TIMEOUT, - }; - Broker::new(config) - .await - .expect("failed to initialize broker") - } -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -struct MockPayload { - data: String, -} - -struct MockStream {} - -impl BrokerStream for MockStream { - type Payload = MockPayload; - - fn key(&self) -> &str { - STREAM_KEY - } -} - -#[test_log::test(tokio::test)] -async fn test_it_produces_events() { - let docker = Cli::default(); - let mut state = TestState::setup(&docker).await; - let mut broker = state.create_broker().await; - // Produce events using the Broker struct - const N: usize = 3; - let mut ids = vec![]; - for i in 0..N { - let data = MockPayload { - data: i.to_string(), - }; - let id = broker - .produce(&MockStream {}, data) - .await - .expect("failed to produce"); - ids.push(id); - } - // Check the events directly in Redis - let reply: StreamRangeReply = state - .conn - .xrange(STREAM_KEY, "-", "+") - .await - .expect("failed to read"); - assert_eq!(reply.ids.len(), 3); - for i in 0..N { - let expected = format!(r#"{{"data":"{}"}}"#, i); - assert_eq!(reply.ids[i].id, ids[i]); - assert_eq!(reply.ids[i].get::("payload").unwrap(), expected); - } -} - -#[test_log::test(tokio::test)] -async fn test_it_peeks_in_stream_with_no_events() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - let mut broker = state.create_broker().await; - let event = broker - .peek_latest(&MockStream {}) - .await - .expect("failed to peek"); - assert!(matches!(event, None)); -} - -#[test_log::test(tokio::test)] -async fn test_it_peeks_in_stream_with_multiple_events() { - let docker = Cli::default(); - let mut state = TestState::setup(&docker).await; - // Produce multiple events directly in Redis - const N: usize = 3; - for i in 0..N { - let id = format!("1-{}", i); - let data = format!(r#"{{"data":"{}"}}"#, i); - let _: String = state - .conn - .xadd(STREAM_KEY, id, &[("payload", data)]) - .await - .expect("failed to add events"); - } - // Peek the event using the Broker struct - let mut broker = state.create_broker().await; - let event = broker - .peek_latest(&MockStream {}) - .await - .expect("failed to peek"); - if let Some(event) = event { - assert_eq!(&event.id, "1-2"); - assert_eq!(&event.payload.data, "2"); - } else { - panic!("expected some event"); - } -} - -#[test_log::test(tokio::test)] -async fn test_it_fails_to_peek_event_in_invalid_format() { - let docker = Cli::default(); - let mut state = TestState::setup(&docker).await; - // Produce event directly in Redis - let _: String = state - .conn - .xadd(STREAM_KEY, "1-0", &[("wrong_field", "0")]) - .await - .expect("failed to add events"); - // Peek the event using the Broker struct - let mut broker = state.create_broker().await; - let err = broker - .peek_latest(&MockStream {}) - .await - .expect_err("failed to get error"); - assert!(matches!(err, BrokerError::InvalidEvent)); -} - -#[test_log::test(tokio::test)] -async fn test_it_fails_to_peek_event_with_invalid_data_encoding() { - let docker = Cli::default(); - let mut state = TestState::setup(&docker).await; - // Produce event directly in Redis - let _: String = state - .conn - .xadd(STREAM_KEY, "1-0", &[("payload", "not a json")]) - .await - .expect("failed to add events"); - // Peek the event using the Broker struct - let mut broker = state.create_broker().await; - let err = broker - .peek_latest(&MockStream {}) - .await - .expect_err("failed to get error"); - assert!(matches!(err, BrokerError::InvalidPayload { .. })); -} - -#[test_log::test(tokio::test)] -async fn test_it_consumes_events() { - let docker = Cli::default(); - let mut state = TestState::setup(&docker).await; - // Produce multiple events directly in Redis - const N: usize = 3; - for i in 0..N { - let id = format!("1-{}", i); - let data = format!(r#"{{"data":"{}"}}"#, i); - let _: String = state - .conn - .xadd(STREAM_KEY, id, &[("payload", data)]) - .await - .expect("failed to add events"); - } - // Consume events using the Broker struct - let mut broker = state.create_broker().await; - let mut last_id = INITIAL_ID.to_owned(); - for i in 0..N { - let event = broker - .consume_blocking(&MockStream {}, &last_id) - .await - .expect("failed to consume"); - assert_eq!(event.id, format!("1-{}", i)); - assert_eq!(event.payload.data, i.to_string()); - last_id = event.id; - } -} - -#[test_log::test(tokio::test)] -async fn test_it_blocks_until_event_is_produced() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - // Spawn another thread that sends the event after a few ms - let handler = { - let mut conn = state.conn.clone(); - tokio::spawn(async move { - let duration = std::time::Duration::from_millis(10); - tokio::time::sleep(duration).await; - let _: String = conn - .xadd(STREAM_KEY, "1-0", &[("payload", r#"{"data":"0"}"#)]) - .await - .expect("failed to write event"); - }) - }; - // In the main thread, wait for the expected event - let mut broker = state.create_broker().await; - let event = broker - .consume_blocking(&MockStream {}, "0") - .await - .expect("failed to consume event"); - assert_eq!(event.id, "1-0"); - assert_eq!(event.payload.data, "0"); - handler.await.expect("failed to wait handler"); -} - -#[test_log::test(tokio::test)] -async fn test_it_consumes_events_without_blocking() { - let docker = Cli::default(); - let mut state = TestState::setup(&docker).await; - // Produce multiple events directly in Redis - const N: usize = 3; - for i in 0..N { - let id = format!("1-{}", i); - let data = format!(r#"{{"data":"{}"}}"#, i); - let _: String = state - .conn - .xadd(STREAM_KEY, id, &[("payload", data)]) - .await - .expect("failed to add events"); - } - // Consume events using the Broker struct - let mut broker = state.create_broker().await; - let mut last_id = INITIAL_ID.to_owned(); - for i in 0..N { - let event = broker - .consume_nonblocking(&MockStream {}, &last_id) - .await - .expect("failed to consume") - .expect("expected event, got None"); - assert_eq!(event.id, format!("1-{}", i)); - assert_eq!(event.payload.data, i.to_string()); - last_id = event.id; - } -} - -#[test_log::test(tokio::test)] -async fn test_it_does_not_block_when_consuming_empty_stream() { - let docker = Cli::default(); - let state = TestState::setup(&docker).await; - let mut broker = state.create_broker().await; - let event = broker - .consume_nonblocking(&MockStream {}, INITIAL_ID) - .await - .expect("failed to peek"); - assert!(matches!(event, None)); -} diff --git a/offchain/rollups-http-client/Cargo.toml b/offchain/rollups-http-client/Cargo.toml deleted file mode 100644 index 0e2364060..000000000 --- a/offchain/rollups-http-client/Cargo.toml +++ /dev/null @@ -1,11 +0,0 @@ -[package] -name = "rollups-http-client" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -hyper = { workspace = true, features = ["http1", "runtime", "client"] } -serde = { workspace = true, features = ["derive"] } -serde_json.workspace = true -tracing.workspace = true diff --git a/offchain/rollups-http-client/README.md b/offchain/rollups-http-client/README.md deleted file mode 100644 index 8a5710642..000000000 --- a/offchain/rollups-http-client/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Rollups HTTP Client - -Library crate that offers an HTTP client compatible with the Rollups HTTP server API (see [Rollups OpenAPI interfaces](https://github.com/cartesi/openapi-interfaces/blob/main/rollup.yaml)). diff --git a/offchain/rollups-http-client/src/client.rs b/offchain/rollups-http-client/src/client.rs deleted file mode 100644 index 63165091e..000000000 --- a/offchain/rollups-http-client/src/client.rs +++ /dev/null @@ -1,212 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::rollup::{ - AdvanceRequest, Exception, IndexResponse, InspectRequest, Notice, Report, - RollupRequest, RollupResponse, Voucher, -}; -use serde::{Deserialize, Serialize}; -use std::io::ErrorKind; - -#[derive(Debug, Serialize, Deserialize)] -#[serde(tag = "request_type")] -enum RollupHttpRequest { - #[serde(rename = "advance_state")] - Advance { data: AdvanceRequest }, - #[serde(rename = "inspect_state")] - Inspect { data: InspectRequest }, -} - -pub async fn send_voucher(rollup_http_server_addr: &str, voucher: Voucher) { - tracing::debug!("sending voucher request to {}", rollup_http_server_addr); - let client = hyper::Client::new(); - let req = hyper::Request::builder() - .method(hyper::Method::POST) - .header(hyper::header::CONTENT_TYPE, "application/json") - .uri(rollup_http_server_addr.to_string() + "/voucher") - .body(hyper::Body::from(serde_json::to_string(&voucher).unwrap())) - .expect("voucher request"); - match client.request(req).await { - Ok(res) => { - let id_response = serde_json::from_slice::( - &hyper::body::to_bytes(res) - .await - .expect("error in voucher in response handling"), - ); - tracing::debug!("voucher generated: {:?}", &id_response); - } - Err(e) => { - tracing::error!( - "failed to send voucher request to rollup http server: {}", - e - ); - } - } -} - -pub async fn send_notice(rollup_http_server_addr: &str, notice: Notice) { - tracing::debug!("sending notice request to {}", rollup_http_server_addr); - let client = hyper::Client::new(); - let req = hyper::Request::builder() - .method(hyper::Method::POST) - .header(hyper::header::CONTENT_TYPE, "application/json") - .uri(rollup_http_server_addr.to_string() + "/notice") - .body(hyper::Body::from(serde_json::to_string(¬ice).unwrap())) - .expect("notice request"); - match client.request(req).await { - Ok(res) => { - let id_response = serde_json::from_slice::( - &hyper::body::to_bytes(res) - .await - .expect("error in notice id response handling"), - ); - tracing::debug!("notice generated: {:?}", &id_response); - } - Err(e) => { - tracing::error!( - "failed to send notice request to rollup http server: {}", - e - ); - } - } -} - -pub async fn send_report(rollup_http_server_addr: &str, report: Report) { - tracing::debug!("sending report request to {}", rollup_http_server_addr); - let client = hyper::Client::new(); - let req = hyper::Request::builder() - .method(hyper::Method::POST) - .header(hyper::header::CONTENT_TYPE, "application/json") - .uri(rollup_http_server_addr.to_string() + "/report") - .body(hyper::Body::from(serde_json::to_string(&report).unwrap())) - .expect("report request"); - if let Err(e) = client.request(req).await { - tracing::error!( - "failed to send report request to rollup http server: {}", - e - ); - } -} - -pub async fn throw_exception( - rollup_http_server_addr: &str, - exception: Exception, -) { - tracing::debug!( - "throwing exception request to {}", - rollup_http_server_addr - ); - let client = hyper::Client::new(); - let req = hyper::Request::builder() - .method(hyper::Method::POST) - .header(hyper::header::CONTENT_TYPE, "application/json") - .uri(rollup_http_server_addr.to_string() + "/exception") - .body(hyper::Body::from( - serde_json::to_string(&exception).unwrap(), - )) - .expect("exception request"); - if let Err(e) = client.request(req).await { - tracing::error!( - "failed to send exception throw request to rollup http server : {}", - e - ); - } - // Here it doesn't matter what application does, as server manager - // will terminate machine execution - #[cfg(target_arch = "riscv64")] - { - panic!("exception happened due to exception parameter!"); - } -} - -pub async fn send_finish_request( - rollup_http_server_addr: &str, - result: &RollupResponse, -) -> Result { - // Application advance request resulting status - let status = match result { - RollupResponse::Finish(value) => { - if *value { - "accept" - } else { - "reject" - } - } - }; - // Reconstruct http dispatcher finish target endpoint - let rollup_http_server_endpoint = - format!("{}/finish", rollup_http_server_addr); - tracing::debug!( - "sending finish request to {}", - rollup_http_server_endpoint - ); - // Send finish request to rollup http server - { - let mut json_status = std::collections::HashMap::new(); - json_status.insert("status", status); - let client = hyper::Client::new(); - // Prepare http request - let req = hyper::Request::builder() - .method(hyper::Method::POST) - .header(hyper::header::CONTENT_TYPE, "application/json") - .uri(rollup_http_server_endpoint) - .body(hyper::Body::from( - serde_json::to_string(&json_status).expect("status json"), - )) - .expect("finish request"); - - // Send http request targeting target-proxy /finish endpoint - // And parse response with the new advance/inspect request - match client.request(req).await { - Ok(res) => { - if res.status().is_success() { - // Handle Rollup Http Request received in json body - let buf = hyper::body::to_bytes(res) - .await - .expect("error in rollup http server response handling") - .to_vec(); - let finish_response = serde_json::from_slice::< - RollupHttpRequest, - >(&buf) - .expect( - "rollup http server response deserialization failed", - ); - tracing::debug!( - "rollup http request finish response: {:?}", - &finish_response - ); - - match finish_response { - RollupHttpRequest::Advance { - data: advance_request, - } => Ok(RollupRequest::Advance(advance_request)), - RollupHttpRequest::Inspect { - data: inspect_request, - } => Ok(RollupRequest::Inspect(inspect_request)), - } - } else { - // Rollup http server returned error on finish request - // Handle error message received in plain http response - let finish_error = String::from_utf8( - hyper::body::to_bytes(res) - .await - .expect("error in rollup http server finish response handling") - .into_iter() - .collect(), - ) - .expect("failed to decode message"); - - Err(std::io::Error::new(ErrorKind::Other, finish_error)) - } - } - Err(e) => { - tracing::error!( - "Failed to send `{}` response to the server: {}", - status, - e - ); - Err(std::io::Error::new(ErrorKind::Other, e.to_string())) - } - } - } -} diff --git a/offchain/rollups-http-client/src/rollup.rs b/offchain/rollups-http-client/src/rollup.rs deleted file mode 100644 index 51c5c72b2..000000000 --- a/offchain/rollups-http-client/src/rollup.rs +++ /dev/null @@ -1,75 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use serde::{Deserialize, Serialize}; -use std::error::Error; -use std::fmt; - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AdvanceMetadata { - pub msg_sender: String, - pub epoch_index: u64, - pub input_index: u64, - pub block_number: u64, - pub timestamp: u64, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct AdvanceRequest { - pub metadata: AdvanceMetadata, - pub payload: String, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct InspectRequest { - pub payload: String, -} - -#[derive(Debug, PartialEq, Eq)] -pub struct RollupRequestError { - pub cause: String, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Notice { - pub payload: String, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Voucher { - pub destination: String, - pub payload: String, -} - -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] -pub struct Report { - pub payload: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct IndexResponse { - index: u64, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Exception { - pub payload: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub enum RollupRequest { - Inspect(InspectRequest), - Advance(AdvanceRequest), -} - -pub enum RollupResponse { - Finish(bool), -} - -impl Error for RollupRequestError {} - -impl fmt::Display for RollupRequestError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "failed to execute rollup request ({})", self.cause) - } -} diff --git a/offchain/state-server/Cargo.toml b/offchain/state-server/Cargo.toml deleted file mode 100644 index 5fb700f4b..000000000 --- a/offchain/state-server/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "state-server" -edition.workspace = true -license.workspace = true -version.workspace = true - -[[bin]] -name = "cartesi-rollups-state-server" -path = "src/main.rs" - -[dependencies] -log = { path = "../log" } -types = { path = "../types" } - -clap = { workspace = true, features = ["derive", "env"] } -eth-block-history.workspace = true -eth-state-fold-types.workspace = true -eth-state-fold.workspace = true -eth-state-server-lib.workspace = true -serde.workspace = true -snafu.workspace = true -tokio = { workspace = true, features = ["macros", "sync", "rt-multi-thread"] } -tonic.workspace = true -tracing.workspace = true -url.workspace = true diff --git a/offchain/state-server/README.md b/offchain/state-server/README.md deleted file mode 100644 index bd0598c10..000000000 --- a/offchain/state-server/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# State-fold server - -Service based on the [State-fold library](https://github.com/cartesi/state-fold) used to: - -- Detect state changes in the blockchain to generate rollups input events to be processed by the Cartesi Node. diff --git a/offchain/state-server/src/config.rs b/offchain/state-server/src/config.rs deleted file mode 100644 index 3101ea8d7..000000000 --- a/offchain/state-server/src/config.rs +++ /dev/null @@ -1,43 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use clap::Parser; -use eth_state_server_lib::config::{ - Result, StateServerConfig, StateServerEnvCLIConfig, -}; -use log::{LogConfig, LogEnvCliConfig}; - -#[derive(Parser)] -#[command(name = "state_server_config")] -#[command(about = "Configuration for state-server")] -pub struct EnvCLIConfig { - #[command(flatten)] - pub state_server_config: StateServerEnvCLIConfig, - - #[command(flatten)] - pub log_config: LogEnvCliConfig, -} - -#[derive(Debug, Clone)] -pub struct Config { - pub state_server_config: StateServerConfig, - pub log_config: LogConfig, -} - -impl Config { - pub fn initialize(env_cli_config: EnvCLIConfig) -> Result { - let state_server_config = - StateServerConfig::initialize(env_cli_config.state_server_config); - let log_config = LogConfig::initialize(env_cli_config.log_config); - - Ok(Self { - state_server_config: state_server_config?, - log_config, - }) - } - - pub fn initialize_from_args() -> Result { - let env_cli_config = EnvCLIConfig::parse(); - Self::initialize(env_cli_config) - } -} diff --git a/offchain/state-server/src/error.rs b/offchain/state-server/src/error.rs deleted file mode 100644 index 0dc5b9414..000000000 --- a/offchain/state-server/src/error.rs +++ /dev/null @@ -1,28 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use eth_block_history::BlockArchiveError; -use eth_state_fold_types::ethers::providers::{Http, RetryClient}; -use snafu::Snafu; -use tonic::transport::Error as TonicError; -use url::ParseError; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] -#[allow(clippy::enum_variant_names)] -pub enum StateServerError { - #[snafu(display("tonic error"))] - TonicError { source: TonicError }, - - #[snafu(display("parser error"))] - ParserError { source: ParseError }, - - #[snafu(display("block archive error"))] - BlockArchiveError { - source: BlockArchiveError< - eth_state_fold_types::ethers::providers::Provider< - RetryClient, - >, - >, - }, -} diff --git a/offchain/state-server/src/lib.rs b/offchain/state-server/src/lib.rs deleted file mode 100644 index 9e342e058..000000000 --- a/offchain/state-server/src/lib.rs +++ /dev/null @@ -1,116 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use eth_state_fold::{Foldable, StateFoldEnvironment}; -use eth_state_fold_types::ethers::providers::{ - Http, HttpRateLimitRetryPolicy, Provider, RetryClient, -}; -use eth_state_server_lib::{ - config, - grpc_server::StateServer, - utils::{start_server, wait_for_signal}, -}; -use snafu::ResultExt; -use std::sync::{Arc, Mutex}; -use tokio::sync::oneshot; -use types::UserData; -use url::Url; - -use crate::error::{ - BlockArchiveSnafu, ParserSnafu, StateServerError, TonicSnafu, -}; - -mod error; - -const MAX_RETRIES: u32 = 10; -const INITIAL_BACKOFF: u64 = 1000; - -#[tracing::instrument(level = "trace")] -pub async fn run_server> + 'static>( - config: config::StateServerConfig, -) -> Result<(), StateServerError> -where - ::InitialState: serde::de::DeserializeOwned, - F: serde::ser::Serialize, -{ - let provider = create_provider(&config)?; - let block_subscriber = - create_block_subscriber(&config, Arc::clone(&provider)).await?; - let env = create_env( - &config, - Arc::clone(&provider), - Arc::clone(&block_subscriber.block_archive), - )?; - - let server = StateServer::<_, _, F>::new(block_subscriber, env); - - let (shutdown_tx, shutdown_rx) = oneshot::channel(); - - tokio::spawn(async { wait_for_signal(shutdown_tx).await }); - - start_server(&config, server, shutdown_rx) - .await - .context(TonicSnafu) -} - -type ServerProvider = Provider>; - -fn create_provider( - config: &config::StateServerConfig, -) -> Result, StateServerError> { - let http = Http::new( - Url::parse(&config.block_history.http_endpoint).context(ParserSnafu)?, - ); - - let retry_client = RetryClient::new( - http, - Box::new(HttpRateLimitRetryPolicy), - MAX_RETRIES, - INITIAL_BACKOFF, - ); - - let provider = Provider::new(retry_client); - - Ok(Arc::new(provider)) -} - -fn create_env( - config: &config::StateServerConfig, - provider: Arc, - block_archive: Arc>, -) -> Result< - Arc>>, - StateServerError, -> { - let env = StateFoldEnvironment::new( - provider, - Some(block_archive), - config.state_fold.safety_margin, - config.state_fold.genesis_block, - config.state_fold.query_limit_error_codes.clone(), - config.state_fold.concurrent_events_fetch, - 10000, - Mutex::new(UserData::default()), - ); - - Ok(Arc::new(env)) -} - -async fn create_block_subscriber( - config: &config::StateServerConfig, - provider: Arc, -) -> Result< - Arc>, - StateServerError, -> { - let block_subscriber = eth_block_history::BlockSubscriber::start( - Arc::clone(&provider), - config.block_history.ws_endpoint.to_owned(), - config.block_history.block_timeout, - config.block_history.max_depth, - ) - .await - .context(BlockArchiveSnafu)?; - - Ok(Arc::new(block_subscriber)) -} diff --git a/offchain/state-server/src/main.rs b/offchain/state-server/src/main.rs deleted file mode 100644 index b6afd84af..000000000 --- a/offchain/state-server/src/main.rs +++ /dev/null @@ -1,18 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) -mod config; -use config::Config; -use types::foldables::InputBox; - -#[tokio::main] -async fn main() -> Result<(), Box> { - let config: Config = Config::initialize_from_args()?; - - log::configure(&config.log_config); - - log::log_service_start(&config, "State Server"); - - state_server::run_server::(config.state_server_config) - .await - .map_err(|e| e.into()) -} diff --git a/offchain/test-fixtures/Cargo.toml b/offchain/test-fixtures/Cargo.toml deleted file mode 100644 index 245d93bfe..000000000 --- a/offchain/test-fixtures/Cargo.toml +++ /dev/null @@ -1,21 +0,0 @@ -[package] -name = "test-fixtures" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -grpc-interfaces = { path = "../grpc-interfaces" } -rollups-data = { path = "../data" } -rollups-events = { path = "../rollups-events" } - -anyhow.workspace = true -backoff = { workspace = true, features = ["tokio"] } -hyper = { workspace = true, features = ["http1", "runtime", "client"] } -json.workspace = true -tempfile.workspace = true -testcontainers.workspace = true -tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } -tonic.workspace = true -tracing.workspace = true -users.workspace = true diff --git a/offchain/test-fixtures/README.md b/offchain/test-fixtures/README.md deleted file mode 100644 index d44628125..000000000 --- a/offchain/test-fixtures/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Test Fixtures - -Library crate that define text fixtures used by tests included in the services that comprise the Cartesi Node. diff --git a/offchain/test-fixtures/docker/server_manager_nonroot.Dockerfile b/offchain/test-fixtures/docker/server_manager_nonroot.Dockerfile deleted file mode 100644 index d53233d57..000000000 --- a/offchain/test-fixtures/docker/server_manager_nonroot.Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -# (c) Cartesi and individual authors (see AUTHORS) -# SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -FROM cartesi/server-manager:0.9.1 - -ARG user -ARG group -ARG uid -ARG gid - -USER root - -RUN if ! getent group ${gid}; then \ - groupadd -g ${gid} ${group}; \ - fi - -RUN useradd -u ${uid} -g ${gid} -s /bin/sh -m ${user} - -USER ${uid}:${gid} diff --git a/offchain/test-fixtures/src/data.rs b/offchain/test-fixtures/src/data.rs deleted file mode 100644 index 3e8fab6c4..000000000 --- a/offchain/test-fixtures/src/data.rs +++ /dev/null @@ -1,64 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use rollups_data::run_migrations; -use testcontainers::{clients::Cli, images::postgres::Postgres, Container}; - -pub const POSTGRES_DB: &str = "postgres"; -pub const POSTGRES_USER: &str = "postgres"; -pub const POSTGRES_PASSWORD: &str = "pw"; -pub const POSTGRES_HOST: &str = "localhost"; - -pub struct DataFixture<'d> { - _node: Container<'d, Postgres>, - pub user: String, - pub password: String, - pub hostname: String, - pub port: u16, - pub db: String, - pub endpoint: String, -} - -impl DataFixture<'_> { - #[tracing::instrument(level = "trace", skip_all)] - pub fn setup(docker: &Cli) -> DataFixture<'_> { - tracing::info!("setting up postgres fixture"); - - tracing::trace!("starting postgres docker container"); - - let image = testcontainers::RunnableImage::from( - testcontainers::images::postgres::Postgres::default(), - ) - .with_env_var(("POSTGRES_DB".to_owned(), POSTGRES_DB)) - .with_env_var(("POSTGRES_USER".to_owned(), POSTGRES_USER)) - .with_env_var(("POSTGRES_PASSWORD".to_owned(), POSTGRES_PASSWORD)) - .with_tag("13-alpine"); - - let node = docker.run(image); - let port = node.get_host_port_ipv4(5432); - let pg_endpoint = format!( - "postgres://{}:{}@{}:{}/{}", - POSTGRES_USER, POSTGRES_PASSWORD, POSTGRES_HOST, port, POSTGRES_DB - ); - - run_migrations(&pg_endpoint).unwrap(); - - DataFixture { - _node: node, - user: POSTGRES_USER.to_string(), - password: POSTGRES_PASSWORD.to_string(), - hostname: POSTGRES_HOST.to_string(), - port, - db: POSTGRES_DB.to_string(), - endpoint: pg_endpoint, - } - } - - pub fn port(&self) -> u16 { - self.port - } - - pub fn endpoint(&self) -> &str { - &self.endpoint - } -} diff --git a/offchain/test-fixtures/src/docker_cli.rs b/offchain/test-fixtures/src/docker_cli.rs deleted file mode 100644 index f387baae6..000000000 --- a/offchain/test-fixtures/src/docker_cli.rs +++ /dev/null @@ -1,47 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use std::process::Command; - -pub fn build(dockerfile: &str, tag: &str, build_args: &[(&str, &str)]) { - let build_args: Vec = build_args - .iter() - .map(|(key, value)| format!("--build-arg={}={}", key, value)) - .collect(); - let mut args = vec!["build", "-f", dockerfile, "-t", tag]; - for build_arg in build_args.iter() { - args.push(build_arg); - } - args.push("."); - docker_run(&args); -} - -pub fn create(tag: &str) -> String { - let mut id = docker_run(&["create", tag]); - id.pop().expect("failed to remove new line"); - String::from_utf8_lossy(&id).to_string() -} - -pub fn cp(from: &str, to: &str) { - docker_run(&["cp", from, to]); -} - -pub fn rm(id: &str) { - docker_run(&["rm", "-v", id]); -} - -#[tracing::instrument(level = "trace", skip_all)] -fn docker_run(args: &[&str]) -> Vec { - tracing::trace!("running docker command 'docker {}'", args.join(" ")); - let output = Command::new("docker") - .args(args) - .output() - .expect("failed to docker_run docker command"); - assert!( - output.status.success(), - "failed to docker_run command 'docker {}'\n{}", - args.join(" "), - String::from_utf8_lossy(&output.stderr) - ); - output.stdout -} diff --git a/offchain/test-fixtures/src/echo_dapp.rs b/offchain/test-fixtures/src/echo_dapp.rs deleted file mode 100644 index 9e74f8dfc..000000000 --- a/offchain/test-fixtures/src/echo_dapp.rs +++ /dev/null @@ -1,127 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use json::{object, JsonValue}; - -async fn print_response( - response: hyper::Response, - endpoint: &str, -) -> Result<(), Box> -where - ::Error: 'static, - ::Error: std::error::Error, -{ - let response_status = response.status().as_u16(); - let response_body = hyper::body::to_bytes(response).await?; - tracing::info!( - "Received {} status {} body {}", - endpoint, - response_status, - std::str::from_utf8(&response_body)? - ); - Ok(()) -} - -async fn handle_advance( - client: &hyper::Client, - server_addr: &str, - request: JsonValue, -) -> Result<&'static str, Box> { - tracing::info!("Received advance request data {}", &request); - let payload = request["data"]["payload"] - .as_str() - .ok_or("Missing payload")?; - tracing::info!("Adding notice"); - let notice = object! {"payload" => payload}; - let req = hyper::Request::builder() - .method(hyper::Method::POST) - .header(hyper::header::CONTENT_TYPE, "application/json") - .uri(format!("{}/notice", server_addr)) - .body(hyper::Body::from(notice.dump()))?; - let response = client.request(req).await?; - print_response(response, "notice").await?; - - let rollup_address = request["data"]["metadata"]["msg_sender"] - .as_str() - .ok_or("Missing msg_sender")?; - tracing::info!("Adding voucher"); - let voucher = object! { "address" => rollup_address, "payload" => payload}; - let req = hyper::Request::builder() - .method(hyper::Method::POST) - .header(hyper::header::CONTENT_TYPE, "application/json") - .uri(format!("{}/voucher", server_addr)) - .body(hyper::Body::from(voucher.dump()))?; - let response = client.request(req).await?; - print_response(response, "voucher").await?; - - Ok("accept") -} - -async fn handle_inspect( - client: &hyper::Client, - server_addr: &str, - request: JsonValue, -) -> Result<&'static str, Box> { - tracing::info!("Received inspect request data {}", &request); - let payload = request["data"]["payload"] - .as_str() - .ok_or("Missing payload")?; - tracing::info!("Adding report"); - let report = object! {"payload" => payload}; - let req = hyper::Request::builder() - .method(hyper::Method::POST) - .header(hyper::header::CONTENT_TYPE, "application/json") - .uri(format!("{}/report", server_addr)) - .body(hyper::Body::from(report.dump()))?; - let response = client.request(req).await?; - print_response(response, "report").await?; - Ok("accept") -} - -pub struct EchoDAppFixture {} - -impl EchoDAppFixture { - pub async fn start_echo_dapp( - server_addr: String, - ) -> Result<(), Box> { - let client = hyper::Client::new(); - - let mut status = "accept"; - loop { - tracing::info!("Sending finish"); - - let response = object! {"status" => status}; - let request = hyper::Request::builder() - .method(hyper::Method::POST) - .header(hyper::header::CONTENT_TYPE, "application/json") - .uri(format!("{}/finish", &server_addr)) - .body(hyper::Body::from(response.dump()))?; - let response = client.request(request).await?; - tracing::info!("Received finish status {}", response.status()); - - if response.status() == hyper::StatusCode::ACCEPTED { - tracing::info!("No pending rollup request, trying again"); - } else { - let body = hyper::body::to_bytes(response).await?; - let utf = std::str::from_utf8(&body)?; - let req = json::parse(utf)?; - - let request_type = req["request_type"] - .as_str() - .ok_or("request_type is not a string")?; - status = match request_type { - "advance_state" => { - handle_advance(&client, &server_addr[..], req).await? - } - "inspect_state" => { - handle_inspect(&client, &server_addr[..], req).await? - } - &_ => { - tracing::info!("Unknown request type"); - "reject" - } - }; - } - } - } -} diff --git a/offchain/test-fixtures/src/host_server_manager.rs b/offchain/test-fixtures/src/host_server_manager.rs deleted file mode 100644 index 648a24347..000000000 --- a/offchain/test-fixtures/src/host_server_manager.rs +++ /dev/null @@ -1,200 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use anyhow::{anyhow, Context}; -use backoff::{future::retry, ExponentialBackoff, ExponentialBackoffBuilder}; -use grpc_interfaces::cartesi_server_manager::{ - processed_input::ProcessedInputOneOf, - server_manager_client::ServerManagerClient, EpochState, - GetEpochStatusRequest, GetEpochStatusResponse, GetSessionStatusRequest, -}; -use rollups_events::Payload; -use std::time::Duration; -use testcontainers::{ - clients::Cli, core::WaitFor, images::generic::GenericImage, Container, -}; -use tokio::sync::Mutex; -use tonic::transport::Channel; - -const SESSION_ID: &str = "default-session-id"; -const RETRY_MAX_ELAPSED_TIME: u64 = 120; - -macro_rules! grpc_call { - ($self: ident, $method: ident, $request: expr) => { - $self - .client - .lock() - .await - .$method($request) - .await - .map(|v| v.into_inner()) - .context("grpc call failed") - }; -} - -pub struct HostServerManagerFixture<'d> { - _node: Container<'d, GenericImage>, - client: Mutex>, - session_id: String, - backoff: ExponentialBackoff, - grpc_endpoint: String, - http_endpoint: String, -} - -impl HostServerManagerFixture<'_> { - #[tracing::instrument(level = "trace", skip_all)] - pub async fn setup(docker: &Cli) -> HostServerManagerFixture<'_> { - tracing::info!("setting up host-server-manager fixture"); - tracing::trace!("starting host-server-manager docker container"); - let image = GenericImage::new("cartesi/host-server-manager", "0.9.0") - .with_wait_for(WaitFor::message_on_stderr( - "starting in Actix runtime", - )) - .with_exposed_port(5001) - .with_exposed_port(5004); - let node = docker.run(image); - let grpc_endpoint = - format!("http://127.0.0.1:{}", node.get_host_port_ipv4(5001)); - let http_endpoint = - format!("http://127.0.0.1:{}", node.get_host_port_ipv4(5004)); - tracing::trace!(grpc_endpoint, "connecting to host-server-manager"); - let client = Mutex::new( - ServerManagerClient::connect(grpc_endpoint.clone()) - .await - .expect("failed to connect to host server manager"), - ); - let backoff = ExponentialBackoffBuilder::new() - .with_max_elapsed_time(Some(Duration::from_secs( - RETRY_MAX_ELAPSED_TIME, - ))) - .build(); - - HostServerManagerFixture { - _node: node, - client, - session_id: SESSION_ID.to_owned(), - backoff, - grpc_endpoint, - http_endpoint, - } - } - - pub fn grpc_endpoint(&self) -> &str { - &self.grpc_endpoint - } - - pub fn http_endpoint(&self) -> &str { - &self.http_endpoint - } - - pub fn session_id(&self) -> &str { - &self.session_id - } - - /// Wait until the session is ready - #[tracing::instrument(level = "trace", skip_all)] - pub async fn assert_session_ready(&self) { - tracing::trace!("asserting whether session is ready"); - retry(self.backoff.clone(), || async { - let request = GetSessionStatusRequest { - session_id: self.session_id.clone(), - }; - grpc_call!(self, get_session_status, request)?; - Ok(()) - }) - .await - .expect("failed to wait for session"); - } - - /// Wait until there is the required amount of processed inputs - #[tracing::instrument(level = "trace", skip_all)] - pub async fn assert_epoch_status( - &self, - epoch_index: u64, - expected_processed: usize, - ) -> GetEpochStatusResponse { - tracing::trace!( - epoch_index, - expected_processed, - "asserting epoch status" - ); - retry(self.backoff.clone(), || async { - let request = GetEpochStatusRequest { - session_id: self.session_id.clone(), - epoch_index, - }; - let response = grpc_call!(self, get_epoch_status, request)?; - if response.processed_inputs.len() != expected_processed { - Err(anyhow!( - "processed_inputs_count fail got={} expected={}", - response.processed_inputs.len(), - expected_processed - ))?; - } - Ok(response) - }) - .await - .expect("failed to wait for epoch status") - } - - /// Wait until there is the required amount of processed inputs - /// Then, compare the obtained output payloads with the expected ones - #[tracing::instrument(level = "trace", skip_all)] - pub async fn assert_epoch_status_payloads( - &self, - epoch_index: u64, - expected_payloads: &[Payload], - ) { - tracing::trace!( - epoch_index, - ?expected_payloads, - "asserting epoch status payloads" - ); - let epoch_status = self - .assert_epoch_status(epoch_index, expected_payloads.len()) - .await; - - assert_eq!( - expected_payloads.len(), - epoch_status.processed_inputs.len() - ); - for (processed_input, expected_payload) in - epoch_status.processed_inputs.iter().zip(expected_payloads) - { - let oneof = - processed_input.processed_input_one_of.as_ref().unwrap(); - match oneof { - ProcessedInputOneOf::AcceptedData(accepted_data) => { - assert_eq!(accepted_data.notices.len(), 1); - assert_eq!( - &accepted_data.notices[0].payload, - expected_payload.inner() - ); - } - ProcessedInputOneOf::ExceptionData(_) => { - panic!("unexpected exception data"); - } - } - } - } - - /// Wait until the given epoch is finished. - /// Raises error if the epoch is not finished after the backoff timeout. - #[tracing::instrument(level = "trace", skip_all)] - pub async fn assert_epoch_finished(&self, epoch_index: u64) { - tracing::trace!(epoch_index, "asserting epoch finished"); - retry(self.backoff.clone(), || async { - let request = GetEpochStatusRequest { - session_id: self.session_id.clone(), - epoch_index, - }; - let response = grpc_call!(self, get_epoch_status, request)?; - if response.state() == EpochState::Active { - Err(anyhow!("epoch {} is not finished", epoch_index))?; - } - Ok(()) - }) - .await - .expect("failed to wait for epoch status") - } -} diff --git a/offchain/test-fixtures/src/lib.rs b/offchain/test-fixtures/src/lib.rs deleted file mode 100644 index bf775307f..000000000 --- a/offchain/test-fixtures/src/lib.rs +++ /dev/null @@ -1,19 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub mod broker; -pub mod data; -pub mod docker_cli; -pub mod echo_dapp; -pub mod host_server_manager; -pub mod machine_snapshots; -pub mod repository; -pub mod server_manager; - -pub use broker::BrokerFixture; -pub use data::DataFixture; -pub use echo_dapp::EchoDAppFixture; -pub use host_server_manager::HostServerManagerFixture; -pub use machine_snapshots::MachineSnapshotsFixture; -pub use repository::RepositoryFixture; -pub use server_manager::ServerManagerFixture; diff --git a/offchain/test-fixtures/src/machine_snapshots.rs b/offchain/test-fixtures/src/machine_snapshots.rs deleted file mode 100644 index 9fd37c49d..000000000 --- a/offchain/test-fixtures/src/machine_snapshots.rs +++ /dev/null @@ -1,36 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use std::path::PathBuf; -use tempfile::TempDir; - -use crate::docker_cli; - -const TAG: &str = "cartesi/rollups-node-snapshot:devel"; -const CONTAINER_SNAPSHOT_DIR: &str = "/usr/share/cartesi/snapshot"; - -pub struct MachineSnapshotsFixture { - dir: TempDir, -} - -impl MachineSnapshotsFixture { - #[tracing::instrument(level = "trace", skip_all)] - pub fn setup() -> Self { - tracing::info!("setting up machine snapshots fixture"); - - let dir = tempfile::tempdir().expect("failed to create temp dir"); - let id = docker_cli::create(TAG); - let from_container = format!("{}:{}", id, CONTAINER_SNAPSHOT_DIR); - docker_cli::cp(&from_container, dir.path().to_str().unwrap()); - docker_cli::rm(&id); - Self { dir } - } - - /// Return the path of directory that contains the snapshot - pub fn path(&self) -> PathBuf { - let snapshot_dir = PathBuf::from(CONTAINER_SNAPSHOT_DIR); - self.dir - .path() - .join(snapshot_dir.file_name().expect("impossible")) - } -} diff --git a/offchain/test-fixtures/src/repository.rs b/offchain/test-fixtures/src/repository.rs deleted file mode 100644 index db6bb87ff..000000000 --- a/offchain/test-fixtures/src/repository.rs +++ /dev/null @@ -1,87 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use backoff::ExponentialBackoffBuilder; -use rollups_data::{RedactedUrl, Repository, RepositoryConfig, Url}; -use std::time::Duration; -use testcontainers::clients::Cli; - -use crate::data::DataFixture; - -const REPOSITORY_MAX_ELAPSED_TIME: u64 = 10; - -/// Fixture that creates a database and connects to it using the -/// rollups_data::Repository struct. -pub struct RepositoryFixture<'d> { - data: DataFixture<'d>, - repository: Repository, -} - -impl RepositoryFixture<'_> { - pub fn setup(docker: &Cli) -> RepositoryFixture { - let data = DataFixture::setup(docker); - let config = create_repository_config(data.port()); - let repository = - Repository::new(config).expect("failed to create repository"); - RepositoryFixture { data, repository } - } - - pub fn config(&self) -> RepositoryConfig { - create_repository_config(self.data.port()) - } - - pub fn repository(&self) -> &Repository { - &self.repository - } - - /// Calls f until it returns Ok or an error different from ItemNotFound. - /// This function is async to allow other services to run in background. - pub async fn retry(&self, mut f: F) -> T - where - F: FnMut(&Repository) -> Result - + Send - + 'static, - T: Send + 'static, - { - let backoff = ExponentialBackoffBuilder::new() - .with_max_elapsed_time(Some(Duration::from_secs( - REPOSITORY_MAX_ELAPSED_TIME, - ))) - .build(); - let repository = self.repository.clone(); - tokio::task::spawn_blocking(move || { - backoff::retry(backoff, || { - f(&repository).map_err(|e| match &e { - rollups_data::Error::ItemNotFound { item_type } => { - tracing::info!("{} not found", item_type); - backoff::Error::transient(e) - } - _ => backoff::Error::permanent(e), - }) - }) - .expect("failed to get input from DB") - }) - .await - .expect("failed to wait for task") - } -} - -fn create_repository_config(postgres_port: u16) -> RepositoryConfig { - use crate::data::*; - let redacted_endpoint = Some(RedactedUrl::new( - Url::parse(&format!( - "postgres://{}:{}@{}:{}/{}", - POSTGRES_USER, - POSTGRES_PASSWORD, - POSTGRES_HOST, - postgres_port, - POSTGRES_DB, - )) - .expect("failed to generate Postgres endpoint"), - )); - RepositoryConfig { - redacted_endpoint, - connection_pool_size: 1, - backoff: Default::default(), - } -} diff --git a/offchain/test-fixtures/src/server_manager.rs b/offchain/test-fixtures/src/server_manager.rs deleted file mode 100644 index c8bd5ca5d..000000000 --- a/offchain/test-fixtures/src/server_manager.rs +++ /dev/null @@ -1,214 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::docker_cli; -use anyhow::{anyhow, Context}; -use backoff::{future::retry, ExponentialBackoff, ExponentialBackoffBuilder}; -use grpc_interfaces::cartesi_server_manager::{ - processed_input::ProcessedInputOneOf, - server_manager_client::ServerManagerClient, EpochState, - GetEpochStatusRequest, GetEpochStatusResponse, GetSessionStatusRequest, -}; -use rollups_events::Payload; -use std::path::Path; -use std::time::Duration; -use testcontainers::{ - clients::Cli, core::WaitFor, images::generic::GenericImage, Container, -}; -use tokio::sync::Mutex; -use tonic::transport::Channel; - -const DOCKERFILE: &str = - "../test-fixtures/docker/server_manager_nonroot.Dockerfile"; -const DOCKER_TAG: &str = "cartesi/test-server-manager-nonroot"; -const SESSION_ID: &str = "default-session-id"; -const RETRY_MAX_ELAPSED_TIME: u64 = 120; - -macro_rules! grpc_call { - ($self: ident, $method: ident, $request: expr) => { - $self - .client - .lock() - .await - .$method($request) - .await - .map(|v| v.into_inner()) - .context("grpc call failed") - }; -} - -pub struct ServerManagerFixture<'d> { - _node: Container<'d, GenericImage>, - client: Mutex>, - endpoint: String, - session_id: String, - backoff: ExponentialBackoff, -} - -impl ServerManagerFixture<'_> { - #[tracing::instrument(level = "trace", skip_all)] - pub async fn setup<'d>( - docker: &'d Cli, - snapshot_dir: &Path, - ) -> ServerManagerFixture<'d> { - tracing::info!("setting up server-manager fixture"); - - tracing::trace!("generating {} docker image", DOCKER_TAG); - let user = users::get_current_username().unwrap(); - let uid = users::get_current_uid().to_string(); - let group = users::get_current_groupname().unwrap(); - let gid = users::get_current_gid().to_string(); - let build_args = vec![ - ("user", user.to_str().unwrap()), - ("uid", &uid), - ("group", group.to_str().unwrap()), - ("gid", &gid), - ]; - docker_cli::build(DOCKERFILE, DOCKER_TAG, &build_args); - - tracing::trace!("starting server manager container"); - let snapshot_dir = snapshot_dir.to_str().unwrap(); - let image = GenericImage::new(DOCKER_TAG, "latest") - .with_wait_for(WaitFor::message_on_stderr("manager version is")) - .with_volume(snapshot_dir, snapshot_dir) - .with_exposed_port(5001); - let node = docker.run(image); - let endpoint = - format!("http://127.0.0.1:{}", node.get_host_port_ipv4(5001)); - - tracing::trace!(endpoint, "connecting to server manager"); - let client = Mutex::new( - ServerManagerClient::connect(endpoint.clone()) - .await - .expect("failed to connect to server manager"), - ); - - let backoff = ExponentialBackoffBuilder::new() - .with_max_elapsed_time(Some(Duration::from_secs( - RETRY_MAX_ELAPSED_TIME, - ))) - .build(); - - ServerManagerFixture { - _node: node, - client, - endpoint, - session_id: SESSION_ID.to_owned(), - backoff, - } - } - - pub fn endpoint(&self) -> &str { - &self.endpoint - } - - pub fn session_id(&self) -> &str { - &self.session_id - } - - /// Wait until the session is ready - #[tracing::instrument(level = "trace", skip_all)] - pub async fn assert_session_ready(&self) { - tracing::trace!("asserting whether session is ready"); - retry(self.backoff.clone(), || async { - let request = GetSessionStatusRequest { - session_id: self.session_id.clone(), - }; - grpc_call!(self, get_session_status, request)?; - Ok(()) - }) - .await - .expect("failed to wait for session"); - } - - /// Wait until there is the required amount of processed inputs - #[tracing::instrument(level = "trace", skip_all)] - pub async fn assert_epoch_status( - &self, - epoch_index: u64, - expected_processed: usize, - ) -> GetEpochStatusResponse { - tracing::trace!( - epoch_index, - expected_processed, - "asserting epoch status" - ); - retry(self.backoff.clone(), || async { - let request = GetEpochStatusRequest { - session_id: self.session_id.clone(), - epoch_index, - }; - let response = grpc_call!(self, get_epoch_status, request)?; - if response.processed_inputs.len() != expected_processed { - Err(anyhow!( - "processed_inputs_count fail got={} expected={}", - response.processed_inputs.len(), - expected_processed - ))?; - } - Ok(response) - }) - .await - .expect("failed to wait for epoch status") - } - - /// Wait until there is the required amount of processed inputs - /// Then, compare the obtained output payloads with the expected ones - #[tracing::instrument(level = "trace", skip_all)] - pub async fn assert_epoch_status_payloads( - &self, - epoch_index: u64, - expected_payloads: &[Payload], - ) { - tracing::trace!( - epoch_index, - ?expected_payloads, - "asserting epoch status payloads" - ); - let epoch_status = self - .assert_epoch_status(epoch_index, expected_payloads.len()) - .await; - assert_eq!( - expected_payloads.len(), - epoch_status.processed_inputs.len() - ); - for (processed_input, expected_payload) in - epoch_status.processed_inputs.iter().zip(expected_payloads) - { - let oneof = - processed_input.processed_input_one_of.as_ref().unwrap(); - match oneof { - ProcessedInputOneOf::AcceptedData(accepted_data) => { - assert_eq!(accepted_data.notices.len(), 1); - assert_eq!( - &accepted_data.notices[0].payload, - expected_payload.inner() - ); - } - ProcessedInputOneOf::ExceptionData(_) => { - panic!("unexpected exception data"); - } - } - } - } - - /// Wait until the given epoch is finished. - /// Raises error if the epoch is not finished after the backoff timeout. - #[tracing::instrument(level = "trace", skip_all)] - pub async fn assert_epoch_finished(&self, epoch_index: u64) { - tracing::trace!(epoch_index, "asserting epoch finished"); - retry(self.backoff.clone(), || async { - let request = GetEpochStatusRequest { - session_id: self.session_id.clone(), - epoch_index, - }; - let response = grpc_call!(self, get_epoch_status, request)?; - if response.state() == EpochState::Active { - Err(anyhow!("epoch {} is not finished", epoch_index))?; - } - Ok(()) - }) - .await - .expect("failed to wait for epoch status") - } -} diff --git a/offchain/types/Cargo.toml b/offchain/types/Cargo.toml deleted file mode 100644 index b9fa6c141..000000000 --- a/offchain/types/Cargo.toml +++ /dev/null @@ -1,22 +0,0 @@ -[package] -name = "types" -edition.workspace = true -license.workspace = true -version.workspace = true - -[dependencies] -contracts = { path = "../contracts" } -rollups-events = { path = "../rollups-events" } - -anyhow.workspace = true -async-trait.workspace = true -clap = { workspace = true, features = ["derive", "env"] } -eth-state-fold-types = { workspace = true, features = ["ethers"] } -eth-state-fold.workspace = true -im = { workspace = true, features = ["serde"] } -serde = { workspace = true, features = ["rc"] } -serde_json.workspace = true -snafu.workspace = true - -[dev-dependencies] -serde_json.workspace = true diff --git a/offchain/types/README.md b/offchain/types/README.md deleted file mode 100644 index 4a4c179a4..000000000 --- a/offchain/types/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Types - -Library crate that define types used by the Cartesi Node services. diff --git a/offchain/types/src/foldables.rs b/offchain/types/src/foldables.rs deleted file mode 100644 index 71d7cb3e1..000000000 --- a/offchain/types/src/foldables.rs +++ /dev/null @@ -1,240 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::{FoldableError, UserData}; - -use eth_state_fold::{ - utils as fold_utils, FoldMiddleware, Foldable, StateFoldEnvironment, - SyncMiddleware, -}; -use eth_state_fold_types::{ - ethers::{ - contract::LogMeta, - prelude::EthEvent, - providers::Middleware, - types::{Address, TxHash}, - }, - Block, -}; - -use anyhow::{ensure, Context}; -use async_trait::async_trait; -use im::{HashMap, Vector}; -use serde::{Deserialize, Serialize}; -use std::sync::{Arc, Mutex}; - -#[derive(Clone, Debug, Eq, Hash, PartialEq, Serialize, Deserialize)] -pub struct InputBoxInitialState { - pub dapp_address: Arc
, - pub input_box_address: Arc
, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct Input { - pub sender: Arc
, - pub payload: Vec, - pub block_added: Arc, - pub dapp: Arc
, - pub tx_hash: Arc, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct DAppInputBox { - pub inputs: Vector>, -} - -#[derive(Clone, Debug, Serialize, Deserialize)] -pub struct InputBox { - pub dapp_address: Arc
, - pub input_box_address: Arc
, - pub dapp_input_boxes: Arc, Arc>>, -} - -#[async_trait] -impl Foldable for InputBox { - type InitialState = InputBoxInitialState; - type Error = FoldableError; - type UserData = Mutex; - - async fn sync( - initial_state: &Self::InitialState, - _block: &Block, - env: &StateFoldEnvironment, - access: Arc>, - ) -> Result { - let dapp_address = Arc::clone(&initial_state.dapp_address); - let input_box_address = Arc::clone(&initial_state.input_box_address); - - Ok(Self { - dapp_input_boxes: updated_inputs( - None, - access, - env, - &input_box_address, - &dapp_address, - None, - ) - .await?, - dapp_address, - input_box_address, - }) - } - - async fn fold( - previous_state: &Self, - block: &Block, // TODO: when new version of state-fold gets released, change this to Arc - // and save on cloning. - env: &StateFoldEnvironment, - access: Arc>, - ) -> Result { - let dapp_address = Arc::clone(&previous_state.dapp_address); - let input_box_address = Arc::clone(&previous_state.input_box_address); - - if !fold_utils::contains_address(&block.logs_bloom, &input_box_address) - || !fold_utils::contains_topic(&block.logs_bloom, &*dapp_address) - || !fold_utils::contains_topic( - &block.logs_bloom, - &contracts::input_box::InputAddedFilter::signature(), - ) - { - return Ok(previous_state.clone()); - } - - Ok(Self { - dapp_input_boxes: updated_inputs( - Some(&previous_state.dapp_input_boxes), - access, - env, - &input_box_address, - &dapp_address, - None, - ) - .await?, - dapp_address, - input_box_address, - }) - } -} - -async fn updated_inputs( - previous_input_boxes: Option<&HashMap, Arc>>, - provider: Arc, - env: &StateFoldEnvironment::UserData>, - contract_address: &Address, - dapp_address: &Address, - block_opt: Option, // TODO: Option>, -) -> Result, Arc>>, FoldableError> { - let mut input_boxes = - previous_input_boxes.cloned().unwrap_or(HashMap::new()); - - let new_inputs = fetch_all_new_inputs( - provider, - env, - contract_address, - dapp_address, - block_opt, - ) - .await?; - - for input in new_inputs { - let dapp = input.dapp.clone(); - let input = Arc::new(input); - - input_boxes - .entry(dapp) - .and_modify(|i| { - let mut new_input_box = (**i).clone(); - new_input_box.inputs.push_back(input.clone()); - *i = Arc::new(new_input_box); - }) - .or_insert_with(|| { - Arc::new(DAppInputBox { - inputs: im::vector![input], - }) - }); - } - - Ok(Arc::new(input_boxes)) -} - -async fn fetch_all_new_inputs< - M1: Middleware + 'static, - M2: Middleware + 'static, ->( - provider: Arc, - env: &StateFoldEnvironment::UserData>, - contract_address: &Address, - dapp_address: &Address, - block_opt: Option, // TODO: Option>, -) -> Result, FoldableError> { - use contracts::input_box::*; - let contract = InputBox::new(*contract_address, Arc::clone(&provider)); - - // Retrieve `InputAdded` events - let input_events = contract - .input_added_filter() - .topic1(*dapp_address) - .query_with_meta() - .await - .context("Error querying for input added events")?; - - let mut inputs = Vec::with_capacity(input_events.len()); - for (event, meta) in input_events { - inputs.push(Input::build_input(env, event, meta, &block_opt).await?); - } - - Ok(inputs) -} - -impl Input { - async fn build_input( - env: &StateFoldEnvironment::UserData>, - event: contracts::input_box::InputAddedFilter, - meta: LogMeta, - block_opt: &Option, // TODO: &Option> - ) -> Result { - let block = - match block_opt { - Some(ref b) => Arc::new(b.clone()), // TODO: remove Arc::new - - None => env.block_with_hash(&meta.block_hash).await.context( - format!("Could not query block `{:?}`", meta.block_hash), - )?, - }; - - meta_consistent_with_block(&meta, &block)?; - - let mut user_data = env - .user_data() - .lock() - .expect("Mutex should never be poisoned"); - - let sender = user_data.get(event.sender); - let dapp = user_data.get(event.dapp); - - Ok(Self { - sender, - payload: event.input.to_vec(), - dapp, - block_added: block, - tx_hash: Arc::new(meta.transaction_hash), - }) - } -} - -fn meta_consistent_with_block( - meta: &LogMeta, - block: &Block, -) -> Result<(), anyhow::Error> { - ensure!( - meta.block_hash == block.hash, - "Sanity check failed: meta and block `block_hash` do not match" - ); - - ensure!( - meta.block_number == block.number, - "Sanity check failed: meta and block `block_number` do not match" - ); - - Ok(()) -} diff --git a/offchain/types/src/lib.rs b/offchain/types/src/lib.rs deleted file mode 100644 index d8b8674de..000000000 --- a/offchain/types/src/lib.rs +++ /dev/null @@ -1,13 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -pub mod blockchain_config; -pub mod error; -pub use error::*; - -pub mod foldables; - -pub mod user_data; -pub mod utils; - -pub use user_data::UserData; diff --git a/offchain/types/src/user_data.rs b/offchain/types/src/user_data.rs deleted file mode 100644 index 9990a27dc..000000000 --- a/offchain/types/src/user_data.rs +++ /dev/null @@ -1,25 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use eth_state_fold_types::ethers::types::Address; - -use std::{collections::HashSet, sync::Arc}; - -#[derive(Debug, Default)] -pub struct UserData { - addresses: HashSet>, -} - -impl UserData { - pub fn get(&mut self, address: Address) -> Arc
{ - // Method `get_or_insert` of HashSet is still unstable - match self.addresses.get(&address) { - Some(s) => Arc::clone(s), - None => { - let s = Arc::new(address); - assert!(self.addresses.insert(s.clone())); - s - } - } - } -} diff --git a/setup_env.sh b/setup_env.sh index e4b1cc720..c08f926c6 100644 --- a/setup_env.sh +++ b/setup_env.sh @@ -1,6 +1,5 @@ export CARTESI_LOG_LEVEL="info" export CARTESI_LOG_PRETTY="true" -export CARTESI_FEATURE_HOST_MODE="false" export CARTESI_FEATURE_DISABLE_CLAIMER="false" export CARTESI_FEATURE_DISABLE_MACHINE_HASH_CHECK="false" export CARTESI_EPOCH_LENGTH="10" @@ -22,7 +21,7 @@ export CARTESI_POSTGRES_ENDPOINT="postgres://postgres:password@localhost:5432/po export CARTESI_HTTP_ADDRESS="0.0.0.0" export CARTESI_HTTP_PORT="10000" -rust_bin_path="$PWD/offchain/target/debug" +rust_bin_path="$PWD/cmd/authority-claimer/target/debug" # Check if the path is already in $PATH if [[ ":$PATH:" != *":$rust_bin_path:"* ]]; then export PATH=$PATH:$rust_bin_path From 6f072cf753ec54ee33ca1e99253a7c5c1ef29c6a Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Tue, 9 Apr 2024 17:49:22 -0300 Subject: [PATCH 02/34] refactor: simplify CI workflows Move the jobs from the assess-code-quality workflows to the build workflow. --- .github/workflows/build.yml | 49 ++++++++++++++++++++++++- .github/workflows/code-quality.yml | 44 ---------------------- .github/workflows/rust-code-quality.yml | 43 ---------------------- 3 files changed, 48 insertions(+), 88 deletions(-) delete mode 100644 .github/workflows/code-quality.yml delete mode 100644 .github/workflows/rust-code-quality.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index c9cc4d3cb..6e149a29e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -19,6 +19,37 @@ permissions: contents: write jobs: + do-basic-checks: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + fetch-depth: 0 + ref: ${{ github.event.pull_request.head.sha }} + + - name: Check conventional commit + uses: cocogitto/cocogitto-action@v3 + id: conventional_commit_check + with: + check-latest-tag-only: true + + - name: Check license header + uses: viperproject/check-license-header@v2 + with: + path: ./ + config: .github/license-check/config.json + + - name: Check auto generated files + run: make check-generate + + - name: Lint Markdown docs + uses: DavidAnson/markdownlint-cli2-action@v16 + with: + globs: | + *.md + docs/*.md + test-rust: runs-on: ubuntu-22.04 env: @@ -56,6 +87,9 @@ jobs: key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} restore-keys: ${{ runner.os }}-cargo- + - name: Update Rust + run: rustup update + - name: Install cargo sweep run: cargo install cargo-sweep continue-on-error: true @@ -64,9 +98,22 @@ jobs: run: cargo install cargo-cache continue-on-error: true + - name: Install cargo-machete + run: cargo install cargo-machete + continue-on-error: true + - name: Set sweep timestamp run: cargo sweep -s + - name: Analyze dependencies + run: cargo machete . + + - name: Check code format + run: cargo fmt --all -- --check + + - name: Run linter + run: cargo clippy -- -A clippy::module_inception + - name: Build binaries and tests run: cargo build --all-targets @@ -101,7 +148,6 @@ jobs: workdir: build load: true - - name: Install Go uses: actions/setup-go@v5 with: @@ -118,6 +164,7 @@ jobs: build-docker: runs-on: ubuntu-22.04 needs: + - do-basic-checks - test-rust - test-go steps: diff --git a/.github/workflows/code-quality.yml b/.github/workflows/code-quality.yml deleted file mode 100644 index 44552e9ec..000000000 --- a/.github/workflows/code-quality.yml +++ /dev/null @@ -1,44 +0,0 @@ -name: Assess code quality - -on: push - -jobs: - assess-code-quality: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - with: - submodules: recursive - fetch-depth: 0 - - - name: Check conventional commit - uses: cocogitto/cocogitto-action@v3 - id: conventional_commit_check - with: - check-latest-tag-only: true - - - name: Check license header - uses: viperproject/check-license-header@v2 - with: - path: ./ - config: .github/license-check/config.json - - - uses: actions/setup-go@v5 - with: - go-version-file: "go.mod" - - - name: Run Go Linter - uses: golangci/golangci-lint-action@v6 - with: - version: v1.58.2 - - - name: Lint Markdown docs - uses: DavidAnson/markdownlint-cli2-action@v16 - with: - globs: | - *.md - docs/*.md - - - name: Check auto generated files - run: make check-generate diff --git a/.github/workflows/rust-code-quality.yml b/.github/workflows/rust-code-quality.yml deleted file mode 100644 index cfadfa482..000000000 --- a/.github/workflows/rust-code-quality.yml +++ /dev/null @@ -1,43 +0,0 @@ -# yaml-language-server: $schema=https://json.schemastore.org/github-workflow.json -name: Assess Rust code quality - -on: push - -jobs: - assess-rust-code-quality: - runs-on: ubuntu-latest - defaults: - run: - working-directory: cmd/authority-claimer - - steps: - - uses: actions/checkout@v4 - with: - submodules: recursive - - - uses: actions/cache@v4 - with: - path: | - ~/.cargo/bin/ - ~/.cargo/registry/index/ - ~/.cargo/registry/cache/ - ~/.cargo/git/db/ - cmd/authority-claimer/target/ - key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }} - restore-keys: ${{ runner.os }}-cargo- - - - name: Update rust - run: rustup update - - - name: Install cargo-machete - run: cargo install cargo-machete - continue-on-error: true - - - name: Analyze dependencies - run: cargo machete . - - - name: Check code format - run: cargo fmt --all -- --check - - - name: Run linter - run: cargo clippy -- -A clippy::module_inception From 6f5024f8f225862e8f07dd1abd5bc0322c4400a4 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Wed, 10 Apr 2024 11:48:24 -0300 Subject: [PATCH 03/34] chore: remove server-manager --- CHANGELOG.md | 1 + README.md | 2 +- build/Dockerfile | 9 --------- build/docker-bake.hcl | 1 - 4 files changed, 2 insertions(+), 11 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cae163a2e..d38f2705a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Removed - Removed `advance-runner`, `dispatcher`, `graphql-server`, `host-runner`, `indexer`, `inspect-server`, and `state-server` Rust services +- Removed `server-manager` from rollups-node Docker image - Removed support to host mode ## [1.5.0] 2024-07-08 diff --git a/README.md b/README.md index 70a58d4de..0d7acb5b7 100644 --- a/README.md +++ b/README.md @@ -74,7 +74,7 @@ The [configuration](#configuration) section describes how to do so. ### Host Mode The host mode allows the developer to run the application back-end in the host machine instead of the Cartesi Machine. -[NoNodo][nonodo] replaces this feature, which will be deprecated in version 2.0 of the Rollups Node. +This feature was deprecated in the Rollups Node 2.0 version; instead, developers should use [NoNodo][nonodo]. [nonodo]: https://github.com/gligneul/nonodo#nonodo diff --git a/build/Dockerfile b/build/Dockerfile index c91bb7df0..f59bce8ba 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -11,7 +11,6 @@ ARG BASE_IMAGE ARG RUST_VERSION ARG GO_VERSION ARG FOUNDRY_NIGHTLY_VERSION -ARG SERVER_MANAGER_VERSION ARG MACHINE_EMULATOR_VERSION ARG ROOTFS_VERSION ARG LINUX_VERSION @@ -288,11 +287,6 @@ RUN go mod download COPY . . RUN go build -ldflags "-s -w -X 'main.buildVersion=${ROLLUPS_NODE_VERSION}'" ./cmd/cartesi-rollups-node -# STAGE: server-manager -# -# This stage creates an alias for the server-manager image with a specific version. -FROM cartesi/server-manager:${SERVER_MANAGER_VERSION} as server-manager - # STAGE: rollups-node # # This stage copies the binaries from the previous stages. @@ -318,9 +312,6 @@ apt-get install -y --no-install-recommends \ rm -rf /var/lib/apt/lists/* EOF -# Copy server-manager. -COPY --from=server-manager /usr/bin/server-manager /usr/bin - # Copy Rust binaries. # Explicitly copy each binary to avoid adding unnecessary files to the runtime image. ARG RUST_BUILD_PATH diff --git a/build/docker-bake.hcl b/build/docker-bake.hcl index bbd1d266f..9eb669ff0 100644 --- a/build/docker-bake.hcl +++ b/build/docker-bake.hcl @@ -21,7 +21,6 @@ target "common" { RUST_VERSION = "1.78.0" GO_VERSION = "1.22.1" FOUNDRY_NIGHTLY_VERSION = "293fad73670b7b59ca901c7f2105bf7a29165a90" - SERVER_MANAGER_VERSION = "0.9.1" MACHINE_EMULATOR_VERSION = "0.16.1" TOOLS_VERSION = "0.14.1" LINUX_VERSION = "0.19.1" From 36e13a84b37f9fb0cafc3bcce62b1eb5c801507b Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Wed, 10 Apr 2024 17:28:20 -0300 Subject: [PATCH 04/34] feat: bump contracts to 2.0-rc.2 --- .github/license-check/config.json | 6 +- CHANGELOG.md | 4 + build/Dockerfile | 13 +- build/compose-devnet.yaml | 9 +- build/compose-sepolia.yaml | 3 +- cmd/authority-claimer/Cargo.lock | 1 - cmd/authority-claimer/Cargo.toml | 1 - cmd/authority-claimer/build.rs | 22 +- cmd/authority-claimer/src/checker.rs | 119 +- cmd/authority-claimer/src/claimer.rs | 9 +- .../src/{config/cli.rs => config.rs} | 151 ++- cmd/authority-claimer/src/config/contracts.rs | 108 -- cmd/authority-claimer/src/config/error.rs | 40 - cmd/authority-claimer/src/config/mod.rs | 63 - cmd/authority-claimer/src/contracts.rs | 3 +- cmd/authority-claimer/src/http_server.rs | 47 +- cmd/authority-claimer/src/lib.rs | 38 +- cmd/authority-claimer/src/main.rs | 2 +- cmd/authority-claimer/src/metrics.rs | 6 +- .../src/rollups_events/broker.rs | 16 +- .../src/rollups_events/common.rs | 3 +- .../src/rollups_events/rollups_claims.rs | 3 +- cmd/authority-claimer/src/sender.rs | 101 +- .../src/signer/aws_signer.rs | 3 +- cmd/authority-claimer/src/signer/signer.rs | 3 +- cmd/authority-claimer/src/test_fixtures.rs | 11 +- .../root/execute/execute.go | 5 +- cmd/cartesi-rollups-cli/root/send/send.go | 2 +- .../root/validate/validate.go | 4 +- cmd/gen-devnet/deployer.go | 28 +- cmd/gen-devnet/main.go | 4 +- cmd/gen-devnet/rollups.go | 6 +- cmd/gen-devnet/types.go | 1 - docs/config.md | 12 +- internal/node/config/config.go | 6 +- internal/node/config/generate/Config.toml | 11 +- internal/node/config/generated.go | 20 +- internal/node/machinehash.go | 4 +- internal/node/machinehash_test.go | 4 +- internal/node/services.go | 5 +- pkg/addresses/addresses.go | 46 +- pkg/contracts/application/application.go | 1036 +++++++++++++++++ pkg/contracts/authority.go | 844 -------------- pkg/contracts/cartesi_dapp.go | 995 ---------------- pkg/contracts/cartesi_dapp_factory.go | 401 ------- pkg/contracts/generate/main.go | 57 +- pkg/contracts/history.go | 634 ---------- pkg/contracts/iconsensus/iconsensus.go | 540 +++++++++ .../{input_box.go => inputbox/inputbox.go} | 111 +- pkg/contracts/inputs/inputs.go | 202 ++++ pkg/contracts/outputs/outputs.go | 223 ++++ pkg/ethutil/ethutil.go | 46 +- pkg/ethutil/ethutil_test.go | 17 +- pkg/readerclient/proof.go | 35 +- rollups-contracts | 2 +- setup_env.sh | 9 +- test/config.go | 8 +- 57 files changed, 2470 insertions(+), 3633 deletions(-) rename cmd/authority-claimer/src/{config/cli.rs => config.rs} (61%) delete mode 100644 cmd/authority-claimer/src/config/contracts.rs delete mode 100644 cmd/authority-claimer/src/config/error.rs delete mode 100644 cmd/authority-claimer/src/config/mod.rs create mode 100644 pkg/contracts/application/application.go delete mode 100644 pkg/contracts/authority.go delete mode 100644 pkg/contracts/cartesi_dapp.go delete mode 100644 pkg/contracts/cartesi_dapp_factory.go delete mode 100644 pkg/contracts/history.go create mode 100644 pkg/contracts/iconsensus/iconsensus.go rename pkg/contracts/{input_box.go => inputbox/inputbox.go} (71%) create mode 100644 pkg/contracts/inputs/inputs.go create mode 100644 pkg/contracts/outputs/outputs.go diff --git a/.github/license-check/config.json b/.github/license-check/config.json index 92a3bf073..741739a79 100644 --- a/.github/license-check/config.json +++ b/.github/license-check/config.json @@ -2,10 +2,8 @@ { "include": ["**/*.go", "**/*.rs"], "exclude": [ - "offchain/grpc-interfaces/**", - "offchain/target/**", - "offchain/data/src/schema.rs", - "pkg/contracts/*.go", + "cmd/authority-claimer/target/**", + "pkg/contracts/**", "pkg/readerclient/generated.go", "pkg/inspectclient/generated.go", "rollups-contracts/**" diff --git a/CHANGELOG.md b/CHANGELOG.md index d38f2705a..2a6cc1354 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Changed + +- Bumped Rollups Contracts to 2.0 + ### Removed - Removed `advance-runner`, `dispatcher`, `graphql-server`, `host-runner`, `indexer`, `inspect-server`, and `state-server` Rust services diff --git a/build/Dockerfile b/build/Dockerfile index f59bce8ba..0c33b1b31 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -153,18 +153,17 @@ apt-get install -y --no-install-recommends \ gnupg EOF -# Install yarn +# Install nodejs ARG DEBIAN_FRONTEND=noninteractive RUN < Result<(), Box> { let tempdir = tempfile::tempdir()?; @@ -19,10 +20,7 @@ fn main() -> Result<(), Box> { download_contracts(&tarball)?; unzip_contracts(&tarball, tempdir.path())?; - let contracts = vec![ - ("consensus/authority", "Authority", "authority.rs"), - ("history", "History", "history.rs"), - ]; + let contracts = vec![("consensus", "IConsensus", "iconsensus.rs")]; for (contract_path, contract_name, bindings_file_name) in contracts { let source_path = path(tempdir.path(), contract_path, contract_name); let output_path: PathBuf = diff --git a/cmd/authority-claimer/src/checker.rs b/cmd/authority-claimer/src/checker.rs index a005e766e..6c4f16106 100644 --- a/cmd/authority-claimer/src/checker.rs +++ b/cmd/authority-claimer/src/checker.rs @@ -1,8 +1,10 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use crate::contracts::history::{Claim, History}; -use crate::rollups_events::{Address, RollupsClaim}; +use crate::{ + contracts::iconsensus::IConsensus, + rollups_events::{Address, Hash, RollupsClaim}, +}; use async_trait::async_trait; use ethers::{ self, @@ -10,11 +12,10 @@ use ethers::{ providers::{ Http, HttpRateLimitRetryPolicy, Middleware, Provider, RetryClient, }, - types::H160, + types::{Address as EthersAddress, H160}, }; use snafu::{ensure, ResultExt, Snafu}; -use std::sync::Arc; -use std::{collections::HashMap, fmt::Debug}; +use std::{collections::HashSet, fmt::Debug, sync::Arc}; use tracing::trace; use url::{ParseError, Url}; @@ -36,11 +37,20 @@ pub trait DuplicateChecker: Debug { // DefaultDuplicateChecker // ------------------------------------------------------------------------------------------------ +#[derive(Debug, Clone, Hash, Eq, PartialEq)] +struct Claim { + application: Address, + first_index: u64, + last_index: u64, + epoch_hash: Hash, +} + #[derive(Debug)] pub struct DefaultDuplicateChecker { provider: Arc>>, - history: History>>, - claims: HashMap>, + iconsensus: IConsensus>>, + from: EthersAddress, + claims: HashSet, confirmations: usize, next_block_to_read: u64, } @@ -66,24 +76,13 @@ pub enum DuplicateCheckerError { latest ))] DepthTooHigh { depth: u64, latest: u64 }, - - #[snafu(display( - "Claim mismatch; blockchain expects [{}, ?], but got claim with [{}, {}]", - expected_first_index, - claim_first_index, - claim_last_index - ))] - ClaimMismatch { - expected_first_index: u128, - claim_first_index: u128, - claim_last_index: u128, - }, } impl DefaultDuplicateChecker { pub async fn new( http_endpoint: String, - history_address: Address, + iconsensus: Address, + from: EthersAddress, confirmations: usize, genesis_block: u64, ) -> Result { @@ -95,14 +94,15 @@ impl DefaultDuplicateChecker { INITIAL_BACKOFF, ); let provider = Arc::new(Provider::new(retry_client)); - let history = History::new( - H160(history_address.inner().to_owned()), + let iconsensus = IConsensus::new( + H160(iconsensus.inner().to_owned()), provider.clone(), ); let mut checker = Self { provider, - history, - claims: HashMap::new(), + iconsensus, + from, + claims: HashSet::new(), confirmations, next_block_to_read: genesis_block, }; @@ -120,27 +120,13 @@ impl DuplicateChecker for DefaultDuplicateChecker { rollups_claim: &RollupsClaim, ) -> Result { self.update_claims().await?; - let expected_first_index = self - .claims // HashMap => DappAddress to Vec - .get(&rollups_claim.dapp_address) // Gets a Option> - .and_then(|claims| claims.last()) // Back to only one Option - .map(|claim| claim.last_index + 1) // Maps to a number - .unwrap_or(0); // If None, unwrap to 0 - if rollups_claim.first_index == expected_first_index { - // This claim is the one the blockchain expects, so it is not considered duplicate. - Ok(false) - } else if rollups_claim.last_index < expected_first_index { - // This claim is already on the blockchain. - Ok(true) - } else { - // This claim is not on blockchain, but it isn't the one blockchain expects. - // If this happens, there is a bug on the dispatcher. - Err(DuplicateCheckerError::ClaimMismatch { - expected_first_index, - claim_first_index: rollups_claim.first_index, - claim_last_index: rollups_claim.last_index, - }) - } + let claim = Claim { + application: rollups_claim.dapp_address.clone(), + first_index: rollups_claim.first_index as u64, + last_index: rollups_claim.last_index as u64, + epoch_hash: rollups_claim.epoch_hash.clone(), + }; + Ok(self.claims.contains(&claim)) } } @@ -167,44 +153,33 @@ impl DefaultDuplicateChecker { return Ok(()); } - let new_claims: Vec<(Address, Claim)> = self - .history - .new_claim_to_history_filter() + let claims = self + .iconsensus + .claim_submission_filter() .from_block(self.next_block_to_read) .to_block(latest) + .topic1(self.from) .query() .await - .context(ContractSnafu)? - .into_iter() - .map(|e| (Address::new(e.dapp.into()), e.claim)) - .collect(); + .context(ContractSnafu)?; + trace!( "read new claims {:?} from block {} to {}", - new_claims, + claims, self.next_block_to_read, latest ); - self.append_claims(new_claims); + for claim_submission in claims.into_iter() { + let claim = Claim { + application: Address::new(claim_submission.app_contract.into()), + first_index: claim_submission.input_range.first_index, + last_index: claim_submission.input_range.last_index, + epoch_hash: Hash::new(claim_submission.epoch_hash), + }; + self.claims.insert(claim); + } self.next_block_to_read = latest + 1; - Ok(()) } - - // Appends new claims to the [Address => Vec] hashmap cache. - fn append_claims(&mut self, new_claims: Vec<(Address, Claim)>) { - if new_claims.is_empty() { - return; - } - for (dapp_address, new_claim) in new_claims { - match self.claims.get_mut(&dapp_address) { - Some(old_claims) => { - old_claims.push(new_claim); - } - None => { - self.claims.insert(dapp_address, vec![new_claim]); - } - } - } - } } diff --git a/cmd/authority-claimer/src/claimer.rs b/cmd/authority-claimer/src/claimer.rs index 8e3bbc033..c0dd8e80e 100644 --- a/cmd/authority-claimer/src/claimer.rs +++ b/cmd/authority-claimer/src/claimer.rs @@ -1,15 +1,14 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use async_trait::async_trait; -use snafu::ResultExt; -use std::fmt::Debug; -use tracing::{info, trace}; - use crate::{ checker::DuplicateChecker, listener::BrokerListener, sender::TransactionSender, }; +use async_trait::async_trait; +use snafu::ResultExt; +use std::fmt::Debug; +use tracing::{info, trace}; /// The `Claimer` starts an event loop that waits for claim messages /// from the broker, and then sends the claims to the blockchain. It checks to diff --git a/cmd/authority-claimer/src/config/cli.rs b/cmd/authority-claimer/src/config.rs similarity index 61% rename from cmd/authority-claimer/src/config/cli.rs rename to cmd/authority-claimer/src/config.rs index d22e754de..3b0586c57 100644 --- a/cmd/authority-claimer/src/config/cli.rs +++ b/cmd/authority-claimer/src/config.rs @@ -1,94 +1,141 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use super::contracts::ContractsCLIConfig; -use crate::config::{ - error::{ - AuthorityClaimerConfigError, ContractsSnafu, InvalidRegionSnafu, - MnemonicFileSnafu, TxManagerSnafu, TxSigningConfigError, - TxSigningSnafu, - }, - AuthorityClaimerConfig, ContractsConfig, TxSigningConfig, +use crate::{ + log::{LogConfig, LogEnvCliConfig}, + redacted::Redacted, + rollups_events::{Address, BrokerCLIConfig, BrokerConfig}, }; -use crate::log::{LogConfig, LogEnvCliConfig}; -use crate::redacted::Redacted; -use crate::rollups_events::{BrokerCLIConfig, BrokerConfig}; use clap::{command, Parser}; use eth_tx_manager::{ - config::{TxEnvCLIConfig as TxManagerCLIConfig, TxManagerConfig}, + config::{ + Error as TxManagerConfigError, TxEnvCLIConfig as TxManagerCLIConfig, + TxManagerConfig, + }, Priority, }; -use rusoto_core::Region; -use snafu::ResultExt; +use rusoto_core::{region::ParseRegionError, Region}; +use snafu::{ResultExt, Snafu}; use std::{fs, str::FromStr}; -// ------------------------------------------------------------------------------------------------ -// AuthorityClaimerCLI -// ------------------------------------------------------------------------------------------------ +#[derive(Debug, Snafu)] +#[snafu(visibility(pub(crate)))] +pub enum AuthorityClaimerConfigError { + #[snafu(display("TxManager configuration error"))] + TxManager { source: TxManagerConfigError }, -#[derive(Parser)] -#[command(name = "authority_claimer_config")] -#[command(about = "Configuration for authority-claimer")] -pub(crate) struct AuthorityClaimerCLI { - #[command(flatten)] - pub tx_manager_config: TxManagerCLIConfig, + #[snafu(display("parse IConsensus address error"))] + ParseIConsensusAddress { source: serde_json::Error }, - #[command(flatten)] - pub tx_signing_config: TxSigningCLIConfig, + #[snafu(display("Missing auth configuration"))] + AuthConfigMissing, - #[command(flatten)] - pub broker_config: BrokerCLIConfig, + #[snafu(display("Could not read mnemonic file at path `{}`", path,))] + MnemonicFileError { + path: String, + source: std::io::Error, + }, - #[command(flatten)] - pub log_config: LogEnvCliConfig, + #[snafu(display("Missing AWS region"))] + MissingRegion, - #[command(flatten)] - pub contracts_config: ContractsCLIConfig, + #[snafu(display("Invalid AWS region"))] + InvalidRegion { source: ParseRegionError }, +} - /// Genesis block for reading blockchain events - #[arg(long, env, default_value_t = 1)] +#[derive(Debug, Clone)] +pub struct Config { + pub tx_manager_config: TxManagerConfig, + pub tx_signing_config: TxSigningConfig, + pub tx_manager_priority: Priority, + pub broker_config: BrokerConfig, + pub log_config: LogConfig, + pub iconsensus_address: Address, pub genesis_block: u64, + pub http_server_port: u16, } -impl TryFrom for AuthorityClaimerConfig { - type Error = AuthorityClaimerConfigError; +#[derive(Debug, Clone)] +pub enum TxSigningConfig { + PrivateKey { + private_key: Redacted, + }, + + Mnemonic { + mnemonic: Redacted, + account_index: Option, + }, + + Aws { + key_id: String, + region: Region, + }, +} + +impl Config { + pub fn new() -> Result { + let cli_config = AuthorityClaimerCLI::parse(); - fn try_from(cli_config: AuthorityClaimerCLI) -> Result { let tx_manager_config = TxManagerConfig::initialize(cli_config.tx_manager_config) .context(TxManagerSnafu)?; let tx_signing_config = - TxSigningConfig::try_from(cli_config.tx_signing_config) - .context(TxSigningSnafu)?; + TxSigningConfig::try_from(cli_config.tx_signing_config)?; let broker_config = BrokerConfig::from(cli_config.broker_config); let log_config = LogConfig::initialize(cli_config.log_config); - let contracts_config = - ContractsConfig::try_from(cli_config.contracts_config) - .context(ContractsSnafu)?; + let iconsensus_address = + serde_json::from_str(&cli_config.iconsensus_address) + .context(ParseIConsensusAddressSnafu)?; - Ok(AuthorityClaimerConfig { + Ok(Config { tx_manager_config, tx_signing_config, tx_manager_priority: Priority::Normal, broker_config, log_config, - contracts_config, + iconsensus_address, genesis_block: cli_config.genesis_block, + http_server_port: cli_config.http_server_port, }) } } -// ------------------------------------------------------------------------------------------------ -// TxSigningCLIConfig -// ------------------------------------------------------------------------------------------------ +#[derive(Parser)] +#[command(name = "authority_claimer_config")] +#[command(about = "Configuration for authority-claimer")] +struct AuthorityClaimerCLI { + #[command(flatten)] + pub tx_manager_config: TxManagerCLIConfig, + + #[command(flatten)] + pub tx_signing_config: TxSigningCLIConfig, + + #[command(flatten)] + pub broker_config: BrokerCLIConfig, + + #[command(flatten)] + pub log_config: LogEnvCliConfig, + + /// Address of the IConsensus contract + #[arg(long, env)] + pub iconsensus_address: String, + + /// Genesis block for reading blockchain events + #[arg(long, env, default_value_t = 1)] + pub genesis_block: u64, + + /// Port of the authority-claimer HTTP server + #[arg(long, env, default_value_t = 8080)] + pub http_server_port: u16, +} #[derive(Debug, Parser)] #[command(name = "tx_signing_config")] -pub(crate) struct TxSigningCLIConfig { +struct TxSigningCLIConfig { /// Signer private key, overrides `tx_signing_private_key_file`, `tx_signing_mnemonic` , `tx_signing_mnemonic_file` and `tx_signing_aws_kms_*` #[arg(long, env)] tx_signing_private_key: Option, @@ -119,7 +166,7 @@ pub(crate) struct TxSigningCLIConfig { } impl TryFrom for TxSigningConfig { - type Error = TxSigningConfigError; + type Error = AuthorityClaimerConfigError; fn try_from(cli: TxSigningCLIConfig) -> Result { let account_index = cli.tx_signing_mnemonic_account_index; @@ -152,8 +199,12 @@ impl TryFrom for TxSigningConfig { } else { match (cli.tx_signing_aws_kms_key_id, cli.tx_signing_aws_kms_region) { - (None, _) => Err(TxSigningConfigError::AuthConfigMissing), - (Some(_), None) => Err(TxSigningConfigError::MissingRegion), + (None, _) => { + Err(AuthorityClaimerConfigError::AuthConfigMissing) + } + (Some(_), None) => { + Err(AuthorityClaimerConfigError::MissingRegion) + } (Some(key_id), Some(region)) => { let region = Region::from_str(®ion) .context(InvalidRegionSnafu)?; diff --git a/cmd/authority-claimer/src/config/contracts.rs b/cmd/authority-claimer/src/config/contracts.rs deleted file mode 100644 index 16506f9b9..000000000 --- a/cmd/authority-claimer/src/config/contracts.rs +++ /dev/null @@ -1,108 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use crate::rollups_events::Address; -use crate::types::blockchain_config::RollupsDeployment; -use clap::Parser; -use serde::de::DeserializeOwned; -use snafu::{ResultExt, Snafu}; -use std::{fs::File, io::BufReader, path::PathBuf}; - -#[derive(Clone, Debug)] -pub struct ContractsConfig { - pub history_address: Address, - pub authority_address: Address, -} - -#[derive(Debug, Parser)] -#[command(name = "blockchain_config")] -pub struct ContractsCLIConfig { - /// History contract address - #[arg(long, env)] - pub history_address: Option, - - /// Authority contract address - #[arg(long, env)] - pub authority_address: Option, - - /// Path to file with deployment json of the rollups - #[arg(long, env)] - pub rollups_deployment_file: Option, -} - -impl TryFrom for ContractsConfig { - type Error = ContractsConfigError; - - fn try_from(cli: ContractsCLIConfig) -> Result { - // try to get the values from the environment values - let mut history_address = cli - .history_address - .map(deserialize::
) - .transpose()?; - let mut authority_address = cli - .authority_address - .map(deserialize::
) - .transpose()?; - - // read file and replace values if they are not set - if let Some(file) = cli - .rollups_deployment_file - .map(read::) - .transpose()? - { - history_address = history_address - .or(file.contracts.history.and_then(|c| c.address)); - authority_address = authority_address - .or(file.contracts.authority.and_then(|c| c.address)); - } - - Ok(ContractsConfig { - history_address: history_address - .ok_or(ContractsConfigError::MissingHistoryContractConfig)?, - authority_address: authority_address - .ok_or(ContractsConfigError::MissingAuthorityContractConfig)?, - }) - } -} - -#[derive(Debug, Snafu)] -pub enum ContractsConfigError { - #[snafu(display("Json deserialize error"))] - JsonDeserializeError { source: serde_json::Error }, - - #[snafu(display("Json read error ({})", path.display()))] - JsonReadError { - path: PathBuf, - source: serde_json::Error, - }, - - #[snafu(display("Read file error ({})", path.display()))] - ReadFileError { - path: PathBuf, - source: std::io::Error, - }, - - #[snafu(display("Missing History contract configuration"))] - MissingHistoryContractConfig, - - #[snafu(display("Missing Authority contract configuration"))] - MissingAuthorityContractConfig, -} - -// ------------------------------------------------------------------------------------------------ -// Auxiliary -// ------------------------------------------------------------------------------------------------ - -fn read(path: PathBuf) -> Result { - let file = - File::open(&path).context(ReadFileSnafu { path: path.clone() })?; - let reader = BufReader::new(file); - serde_json::from_reader(reader).context(JsonReadSnafu { path }) -} - -fn deserialize( - s: String, -) -> Result { - serde_json::from_value(serde_json::Value::String(s)) - .context(JsonDeserializeSnafu) -} diff --git a/cmd/authority-claimer/src/config/error.rs b/cmd/authority-claimer/src/config/error.rs deleted file mode 100644 index fa961980e..000000000 --- a/cmd/authority-claimer/src/config/error.rs +++ /dev/null @@ -1,40 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -use eth_tx_manager::config::Error as TxManagerConfigError; -use rusoto_core::region::ParseRegionError; -use snafu::Snafu; - -use super::ContractsConfigError; - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] -pub enum AuthorityClaimerConfigError { - #[snafu(display("TxManager configuration error"))] - TxManager { source: TxManagerConfigError }, - - #[snafu(display("TxSigning configuration error"))] - TxSigning { source: TxSigningConfigError }, - - #[snafu(display("Contracts configuration error"))] - Contracts { source: ContractsConfigError }, -} - -#[derive(Debug, Snafu)] -#[snafu(visibility(pub(crate)))] -pub enum TxSigningConfigError { - #[snafu(display("Missing auth configuration"))] - AuthConfigMissing, - - #[snafu(display("Could not read mnemonic file at path `{}`", path,))] - MnemonicFileError { - path: String, - source: std::io::Error, - }, - - #[snafu(display("Missing AWS region"))] - MissingRegion, - - #[snafu(display("Invalid AWS region"))] - InvalidRegion { source: ParseRegionError }, -} diff --git a/cmd/authority-claimer/src/config/mod.rs b/cmd/authority-claimer/src/config/mod.rs deleted file mode 100644 index 9fffb4fac..000000000 --- a/cmd/authority-claimer/src/config/mod.rs +++ /dev/null @@ -1,63 +0,0 @@ -// (c) Cartesi and individual authors (see AUTHORS) -// SPDX-License-Identifier: Apache-2.0 (see LICENSE) - -mod cli; -mod contracts; -mod error; - -pub use contracts::{ContractsConfig, ContractsConfigError}; -pub use error::AuthorityClaimerConfigError; - -use crate::http_server::HttpServerConfig; -use crate::log::LogConfig; -use crate::redacted::Redacted; -use crate::rollups_events::BrokerConfig; -use cli::AuthorityClaimerCLI; -use eth_tx_manager::{config::TxManagerConfig, Priority}; -use rusoto_core::Region; - -#[derive(Debug, Clone)] -pub struct Config { - pub authority_claimer_config: AuthorityClaimerConfig, - pub http_server_config: HttpServerConfig, -} - -#[derive(Debug, Clone)] -pub struct AuthorityClaimerConfig { - pub tx_manager_config: TxManagerConfig, - pub tx_signing_config: TxSigningConfig, - pub tx_manager_priority: Priority, - pub broker_config: BrokerConfig, - pub log_config: LogConfig, - pub contracts_config: ContractsConfig, - pub genesis_block: u64, -} - -#[derive(Debug, Clone)] -pub enum TxSigningConfig { - PrivateKey { - private_key: Redacted, - }, - - Mnemonic { - mnemonic: Redacted, - account_index: Option, - }, - - Aws { - key_id: String, - region: Region, - }, -} - -impl Config { - pub fn new() -> Result { - let (http_server_config, authority_claimer_cli) = - HttpServerConfig::parse::("authority_claimer"); - let authority_claimer_config = authority_claimer_cli.try_into()?; - Ok(Self { - authority_claimer_config, - http_server_config, - }) - } -} diff --git a/cmd/authority-claimer/src/contracts.rs b/cmd/authority-claimer/src/contracts.rs index bdc43df33..9869dbcb9 100644 --- a/cmd/authority-claimer/src/contracts.rs +++ b/cmd/authority-claimer/src/contracts.rs @@ -15,5 +15,4 @@ macro_rules! contract { }; } -contract!(authority); -contract!(history); +contract!(iconsensus); diff --git a/cmd/authority-claimer/src/http_server.rs b/cmd/authority-claimer/src/http_server.rs index 1c8fb97d6..727ee948e 100644 --- a/cmd/authority-claimer/src/http_server.rs +++ b/cmd/authority-claimer/src/http_server.rs @@ -10,49 +10,6 @@ pub use prometheus_client::metrics::counter::Counter as CounterRef; pub use prometheus_client::metrics::family::Family as FamilyRef; // End of metrics to re-export. -use clap::{ - value_parser, Arg, Command, CommandFactory, FromArgMatches, Parser, -}; - -#[derive(Debug, Clone, Parser)] -pub struct HttpServerConfig { - pub(crate) port: u16, -} - -impl HttpServerConfig { - /// Returns the HTTP server config and the app's config after parsing - /// it from the command line and/or environment variables. - /// - /// The parameter `service` must be a lowercase string that - /// uses underlines as spaces. - /// - /// The parametric type `C` must be a struct that derives `Parser`. - pub fn parse( - service: &'static str, - ) -> (HttpServerConfig, C) { - let command = ::command(); - let command = add_port_arg(command, service); - - let matches = command.get_matches(); - let http_server_config: HttpServerConfig = - FromArgMatches::from_arg_matches(&matches).unwrap(); - let inner_config: C = - FromArgMatches::from_arg_matches(&matches).unwrap(); - (http_server_config, inner_config) - } -} - -fn add_port_arg(command: Command, service: S) -> Command { - let service = service.to_string().to_uppercase(); - command.arg( - Arg::new("port") - .long("http-server-port") - .env(format!("{}_HTTP_SERVER_PORT", service)) - .value_parser(value_parser!(u16)) - .default_value("8080"), - ) -} - use axum::{routing::get, Router}; use prometheus_client::encoding::text::encode; use std::{ @@ -64,11 +21,11 @@ use std::{ /// /// The `Registry` parameter is a `prometheus` type used for metric tracking. pub async fn start( - config: HttpServerConfig, + port: u16, registry: Registry, ) -> Result<(), std::io::Error> { let ip = "0.0.0.0".parse().expect("could not parse host address"); - let addr = SocketAddr::new(ip, config.port); + let addr = SocketAddr::new(ip, port); tracing::info!("Starting HTTP server at {}", addr); let registry = Arc::new(Mutex::new(registry)); diff --git a/cmd/authority-claimer/src/lib.rs b/cmd/authority-claimer/src/lib.rs index 7120f5629..9132f0f7d 100644 --- a/cmd/authority-claimer/src/lib.rs +++ b/cmd/authority-claimer/src/lib.rs @@ -18,14 +18,14 @@ mod types; #[cfg(test)] mod test_fixtures; -use crate::{ - checker::DefaultDuplicateChecker, - claimer::{Claimer, DefaultClaimer}, - listener::DefaultBrokerListener, - metrics::AuthorityClaimerMetrics, - sender::DefaultTransactionSender, -}; +use checker::DefaultDuplicateChecker; +use claimer::{Claimer, DefaultClaimer}; pub use config::Config; +use ethers::signers::Signer; +use listener::DefaultBrokerListener; +use metrics::AuthorityClaimerMetrics; +use sender::DefaultTransactionSender; +use signer::ConditionalSigner; use snafu::Error; use tracing::trace; @@ -33,9 +33,8 @@ pub async fn run(config: Config) -> Result<(), Box> { // Creating the metrics and health server. let metrics = AuthorityClaimerMetrics::new(); let http_server_handle = - http_server::start(config.http_server_config, metrics.clone().into()); + http_server::start(config.http_server_port, metrics.clone().into()); - let config = config.authority_claimer_config; let chain_id = config.tx_manager_config.chain_id; // Creating the broker listener. @@ -44,11 +43,17 @@ pub async fn run(config: Config) -> Result<(), Box> { DefaultBrokerListener::new(config.broker_config.clone(), chain_id) .await?; + // Creating the conditional signer. + let conditional_signer = + ConditionalSigner::new(chain_id, &config.tx_signing_config).await?; + let from = conditional_signer.address(); + // Creating the duplicate checker. trace!("Creating the duplicate checker"); let duplicate_checker = DefaultDuplicateChecker::new( config.tx_manager_config.provider_http_endpoint.clone(), - config.contracts_config.history_address.clone(), + config.iconsensus_address.clone(), + from, config.tx_manager_config.default_confirmations, config.genesis_block, ) @@ -56,9 +61,16 @@ pub async fn run(config: Config) -> Result<(), Box> { // Creating the transaction sender. trace!("Creating the transaction sender"); - let transaction_sender = - DefaultTransactionSender::new(config.clone(), chain_id, metrics) - .await?; + let transaction_sender = DefaultTransactionSender::new( + config.tx_manager_config, + config.tx_manager_priority, + conditional_signer, + config.iconsensus_address, + from, + chain_id, + metrics, + ) + .await?; // Creating the claimer loop. let claimer = DefaultClaimer::new( diff --git a/cmd/authority-claimer/src/main.rs b/cmd/authority-claimer/src/main.rs index 145e8551e..c628a776c 100644 --- a/cmd/authority-claimer/src/main.rs +++ b/cmd/authority-claimer/src/main.rs @@ -10,7 +10,7 @@ async fn main() -> Result<(), Box> { let config: Config = Config::new().map_err(Box::new)?; // Setting up the logging environment. - log::configure(&config.authority_claimer_config.log_config); + log::configure(&config.log_config); //Log Service info log::log_service_start(&config, "Authority Claimer"); diff --git a/cmd/authority-claimer/src/metrics.rs b/cmd/authority-claimer/src/metrics.rs index 7108c88e5..1167b894f 100644 --- a/cmd/authority-claimer/src/metrics.rs +++ b/cmd/authority-claimer/src/metrics.rs @@ -1,8 +1,10 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use crate::http_server::{CounterRef, FamilyRef, Registry}; -use crate::rollups_events::DAppMetadata; +use crate::{ + http_server::{CounterRef, FamilyRef, Registry}, + rollups_events::DAppMetadata, +}; const METRICS_PREFIX: &str = "cartesi_rollups_authority_claimer"; diff --git a/cmd/authority-claimer/src/rollups_events/broker.rs b/cmd/authority-claimer/src/rollups_events/broker.rs index 9df69c5ed..51f31bd17 100644 --- a/cmd/authority-claimer/src/rollups_events/broker.rs +++ b/cmd/authority-claimer/src/rollups_events/broker.rs @@ -1,23 +1,19 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use crate::redacted::{RedactedUrl, Url}; use backoff::{future::retry, ExponentialBackoff, ExponentialBackoffBuilder}; use clap::Parser; -use redis::aio::{ConnectionLike, ConnectionManager}; -use redis::cluster::ClusterClient; -use redis::cluster_async::ClusterConnection; -use redis::streams::{ - StreamId, StreamRangeReply, StreamReadOptions, StreamReadReply, -}; use redis::{ + aio::{ConnectionLike, ConnectionManager}, + cluster::ClusterClient, + cluster_async::ClusterConnection, + streams::{StreamId, StreamRangeReply, StreamReadOptions, StreamReadReply}, AsyncCommands, Client, Cmd, Pipeline, RedisError, RedisFuture, Value, }; use serde::{de::DeserializeOwned, Serialize}; use snafu::{ResultExt, Snafu}; -use std::fmt; -use std::time::Duration; - -use crate::redacted::{RedactedUrl, Url}; +use std::{fmt, time::Duration}; pub const INITIAL_ID: &str = "0"; diff --git a/cmd/authority-claimer/src/rollups_events/common.rs b/cmd/authority-claimer/src/rollups_events/common.rs index 5fbb73fa0..33c9e8fa4 100644 --- a/cmd/authority-claimer/src/rollups_events/common.rs +++ b/cmd/authority-claimer/src/rollups_events/common.rs @@ -2,8 +2,7 @@ // SPDX-License-Identifier: Apache-2.0 (see LICENSE) use base64::{engine::general_purpose::STANDARD as base64_engine, Engine as _}; -use prometheus_client::encoding::EncodeLabelValue; -use prometheus_client::encoding::LabelValueEncoder; +use prometheus_client::encoding::{EncodeLabelValue, LabelValueEncoder}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; use std::fmt::Write; diff --git a/cmd/authority-claimer/src/rollups_events/rollups_claims.rs b/cmd/authority-claimer/src/rollups_events/rollups_claims.rs index 7c5abc6ae..2d216ed61 100644 --- a/cmd/authority-claimer/src/rollups_events/rollups_claims.rs +++ b/cmd/authority-claimer/src/rollups_events/rollups_claims.rs @@ -1,9 +1,8 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use serde::{Deserialize, Serialize}; - use super::{Address, BrokerStream, Hash}; +use serde::{Deserialize, Serialize}; #[derive(Debug)] pub struct RollupsClaimsStream { diff --git a/cmd/authority-claimer/src/sender.rs b/cmd/authority-claimer/src/sender.rs index 634ecb0bf..0edc16f42 100644 --- a/cmd/authority-claimer/src/sender.rs +++ b/cmd/authority-claimer/src/sender.rs @@ -1,10 +1,15 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use crate::contracts::{authority::Authority, history::Claim}; -use crate::rollups_events::{DAppMetadata, RollupsClaim}; +use crate::{ + contracts::iconsensus::{IConsensus, InputRange}, + metrics::AuthorityClaimerMetrics, + rollups_events::{Address, DAppMetadata, RollupsClaim}, + signer::{ConditionalSigner, ConditionalSignerError}, +}; use async_trait::async_trait; use eth_tx_manager::{ + config::TxManagerConfig, database::FileSystemDatabase as Database, gas_oracle::DefaultGasOracle as GasOracle, manager::Configuration, @@ -12,29 +17,19 @@ use eth_tx_manager::{ transaction::{Priority, Transaction, Value}, Chain, }; -use ethabi::Token; use ethers::{ self, - abi::AbiEncode, middleware::SignerMiddleware, providers::{ Http, HttpRateLimitRetryPolicy, MockProvider, Provider, RetryClient, }, - signers::Signer, - types::{Bytes, NameOrAddress, H160}, + types::{NameOrAddress, H160}, }; use snafu::{OptionExt, ResultExt, Snafu}; -use std::fmt::Debug; -use std::sync::Arc; +use std::{fmt::Debug, sync::Arc}; use tracing::{info, trace}; use url::{ParseError, Url}; -use crate::{ - config::AuthorityClaimerConfig, - metrics::AuthorityClaimerMetrics, - signer::{ConditionalSigner, ConditionalSignerError}, -}; - /// The `TransactionSender` sends claims to the blockchain. /// /// It should wait for N blockchain confirmations. @@ -78,31 +73,13 @@ macro_rules! tx_manager { }; } -struct SubmittableClaim(ethers::types::Address, RollupsClaim); - -impl From for Bytes { - fn from(submittable_claim: SubmittableClaim) -> Self { - let SubmittableClaim(dapp_address, claim) = submittable_claim; - let claim = Claim { - epoch_hash: claim.epoch_hash.into_inner(), - first_index: claim.first_index, - last_index: claim.last_index, - }; - ethers::abi::encode(&[ - Token::Address(dapp_address), - Token::FixedBytes(claim.encode()), - ]) - .into() - } -} - #[derive(Debug)] pub struct DefaultTransactionSender { tx_manager: TransactionManager, confirmations: usize, priority: Priority, from: ethers::types::Address, - authority: Authority>, + iconsensus: IConsensus>, chain_id: u64, metrics: AuthorityClaimerMetrics, } @@ -149,13 +126,12 @@ fn create_middleware( /// Creates the tx-manager instance. /// NOTE: tries to re-instantiate the tx-manager only once. async fn create_tx_manager( - conditional_signer: &ConditionalSigner, + conditional_signer: ConditionalSigner, provider_url: String, database_path: String, chain: Chain, ) -> Result { - let middleware = - create_middleware(conditional_signer.clone(), provider_url)?; + let middleware = create_middleware(conditional_signer, provider_url)?; let result = tx_manager!(new, middleware, database_path, chain); let tx_manager = if let Err(TrasactionManagerError::NonceTooLow { .. }) = result { @@ -173,42 +149,37 @@ async fn create_tx_manager( impl DefaultTransactionSender { pub async fn new( - config: AuthorityClaimerConfig, + tx_manager_config: TxManagerConfig, + tx_manager_priority: Priority, + conditional_signer: ConditionalSigner, + iconsensus: Address, + from: ethers::types::Address, chain_id: u64, metrics: AuthorityClaimerMetrics, ) -> Result { - let chain: Chain = (&config.tx_manager_config).into(); - - let conditional_signer = - ConditionalSigner::new(chain.id, &config.tx_signing_config) - .await - .context(SignerSnafu)?; + let chain: Chain = (&tx_manager_config).into(); let tx_manager = create_tx_manager( - &conditional_signer, - config.tx_manager_config.provider_http_endpoint.clone(), - config.tx_manager_config.database_path.clone(), + conditional_signer, + tx_manager_config.provider_http_endpoint.clone(), + tx_manager_config.database_path.clone(), chain, ) .await?; - let authority = { + let iconsensus = { let (provider, _mock) = Provider::mocked(); let provider = Arc::new(provider); - let address: H160 = config - .contracts_config - .authority_address - .into_inner() - .into(); - Authority::new(address, provider) + let address: H160 = iconsensus.into_inner().into(); + IConsensus::new(address, provider) }; Ok(Self { tx_manager, - confirmations: config.tx_manager_config.default_confirmations, - priority: config.tx_manager_priority, - from: conditional_signer.address(), - authority, + confirmations: tx_manager_config.default_confirmations, + priority: tx_manager_priority, + iconsensus, + from, chain_id, metrics, }) @@ -226,13 +197,17 @@ impl TransactionSender for DefaultTransactionSender { let dapp_address = rollups_claim.dapp_address.clone(); let transaction = { - let submittable_claim = SubmittableClaim( - H160(dapp_address.inner().to_owned()), - rollups_claim, - ); + let input_range = InputRange { + first_index: rollups_claim.first_index as u64, + last_index: rollups_claim.last_index as u64, + }; let call = self - .authority - .submit_claim(submittable_claim.into()) + .iconsensus + .submit_claim( + H160(dapp_address.inner().to_owned()), + input_range, + rollups_claim.epoch_hash.into_inner(), + ) .from(self.from); let to = match call.tx.to().context(InternalEthersSnafu)? { NameOrAddress::Address(a) => *a, diff --git a/cmd/authority-claimer/src/signer/aws_signer.rs b/cmd/authority-claimer/src/signer/aws_signer.rs index 29476ee45..49982a538 100644 --- a/cmd/authority-claimer/src/signer/aws_signer.rs +++ b/cmd/authority-claimer/src/signer/aws_signer.rs @@ -1,6 +1,7 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use super::aws_credentials::AwsCredentialsProvider; use async_trait::async_trait; use ethers::{ signers::{AwsSigner as InnerAwsSigner, AwsSignerError, Signer}, @@ -12,8 +13,6 @@ use ethers::{ use rusoto_core::{HttpClient, Region}; use rusoto_kms::KmsClient; -use super::aws_credentials::AwsCredentialsProvider; - /// The `AwsSigner` (re)implements the `Signer` trait for the `InnerAwsSigner`. /// /// We do not use an `InnerAwsSigner` directly because of lifetime and diff --git a/cmd/authority-claimer/src/signer/signer.rs b/cmd/authority-claimer/src/signer/signer.rs index 14fd25ce1..9a1cb5587 100644 --- a/cmd/authority-claimer/src/signer/signer.rs +++ b/cmd/authority-claimer/src/signer/signer.rs @@ -1,6 +1,7 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) +use crate::{config::TxSigningConfig, signer::aws_signer::AwsSigner}; use async_trait::async_trait; use ethers::{ signers::{ @@ -14,8 +15,6 @@ use ethers::{ }; use snafu::{ResultExt, Snafu}; -use crate::{config::TxSigningConfig, signer::aws_signer::AwsSigner}; - /// The `ConditionalSigner` is implementing conditional dispatch (instead of /// dynamic dispatch) by hand for objects that implement the `Sender` trait. /// diff --git a/cmd/authority-claimer/src/test_fixtures.rs b/cmd/authority-claimer/src/test_fixtures.rs index 38662cc54..2e7dc0f69 100644 --- a/cmd/authority-claimer/src/test_fixtures.rs +++ b/cmd/authority-claimer/src/test_fixtures.rs @@ -1,10 +1,13 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -use crate::redacted::{RedactedUrl, Url}; -use crate::rollups_events::{ - broker::BrokerEndpoint, common::ADDRESS_SIZE, Address, Broker, - BrokerConfig, DAppMetadata, RollupsClaim, RollupsClaimsStream, INITIAL_ID, +use crate::{ + redacted::{RedactedUrl, Url}, + rollups_events::{ + broker::BrokerEndpoint, common::ADDRESS_SIZE, Address, Broker, + BrokerConfig, DAppMetadata, RollupsClaim, RollupsClaimsStream, + INITIAL_ID, + }, }; use backoff::ExponentialBackoff; use testcontainers::{ diff --git a/cmd/cartesi-rollups-cli/root/execute/execute.go b/cmd/cartesi-rollups-cli/root/execute/execute.go index c5505ec61..8e4d816c0 100644 --- a/cmd/cartesi-rollups-cli/root/execute/execute.go +++ b/cmd/cartesi-rollups-cli/root/execute/execute.go @@ -94,15 +94,14 @@ func run(cmd *cobra.Command, args []string) { slog.Info("Executing voucher", "voucher-index", voucherIndex, "input-index", inputIndex, - "application-address", book.CartesiDApp, + "application-address", book.Application, ) - txHash, err := ethutil.ExecuteVoucher( + txHash, err := ethutil.ExecuteOutput( ctx, client, book, signer, resp.Payload, - &resp.Destination, proof, ) cobra.CheckErr(err) diff --git a/cmd/cartesi-rollups-cli/root/send/send.go b/cmd/cartesi-rollups-cli/root/send/send.go index d44554eb2..da62b3c87 100644 --- a/cmd/cartesi-rollups-cli/root/send/send.go +++ b/cmd/cartesi-rollups-cli/root/send/send.go @@ -70,7 +70,7 @@ func run(cmd *cobra.Command, args []string) { book = addresses.GetTestBook() } - slog.Info("Sending input", "application-address", book.CartesiDApp) + slog.Info("Sending input", "application-address", book.Application) inputIndex, err := ethutil.AddInput(ctx, client, book, signer, payload) cobra.CheckErr(err) diff --git a/cmd/cartesi-rollups-cli/root/validate/validate.go b/cmd/cartesi-rollups-cli/root/validate/validate.go index fcca2fc79..03684d9ef 100644 --- a/cmd/cartesi-rollups-cli/root/validate/validate.go +++ b/cmd/cartesi-rollups-cli/root/validate/validate.go @@ -83,9 +83,9 @@ func run(cmd *cobra.Command, args []string) { slog.Info("Validating notice", "notice-index", noticeIndex, "input-index", inputIndex, - "application-address", book.CartesiDApp, + "application-address", book.Application, ) - err = ethutil.ValidateNotice(ctx, client, book, resp.Payload, proof) + err = ethutil.ValidateOutput(ctx, client, book, resp.Payload, proof) cobra.CheckErr(err) slog.Info("Notice validated") diff --git a/cmd/gen-devnet/deployer.go b/cmd/gen-devnet/deployer.go index 25ef0fc22..855280f64 100644 --- a/cmd/gen-devnet/deployer.go +++ b/cmd/gen-devnet/deployer.go @@ -11,7 +11,6 @@ import ( "github.com/cartesi/rollups-node/pkg/addresses" "github.com/cartesi/rollups-node/pkg/ethutil" - "github.com/ethereum/go-ethereum/common" ) const ( @@ -41,32 +40,41 @@ func deploy(ctx context.Context, // Create a Rollups Application by calling the necessary factories func createApplication(ctx context.Context, hash string) (DeploymentInfo, error) { var depInfo DeploymentInfo + addressBook := addresses.GetTestBook() - // Create the Authority/History pair + // Create the Authority contract contractAddresses, err := createContracts(ctx, - common.Address.Hex(addresses.GetTestBook().AuthorityHistoryPairFactory), - "newAuthorityHistoryPair(address,bytes32)(address,address)", + addressBook.AuthorityFactory.Hex(), + "newAuthority(address,bytes32)(address)", CONTRACT_OWNER_ADDRESS, SALT) if err != nil { - return DeploymentInfo{}, fmt.Errorf("could not create authority/history pair: %v", err) + return DeploymentInfo{}, fmt.Errorf("could not create authority: %v", err) } - depInfo.AuthorityAddress = contractAddresses[0] - depInfo.HistoryAddress = contractAddresses[1] + + portalAddresses := []string{ + addressBook.ERC1155BatchPortal.Hex(), + addressBook.ERC1155SinglePortal.Hex(), + addressBook.ERC20Portal.Hex(), + addressBook.ERC721Portal.Hex(), + addressBook.EtherPortal.Hex(), + } + supportedPortals := "[" + strings.Join(portalAddresses, ",") + "]" // Create the Application, passing the address of the newly created Authority contractAddresses, err = createContracts(ctx, - common.Address.Hex(addresses.GetTestBook().CartesiDAppFactory), - "newApplication(address,address,bytes32,bytes32)(address)", + addressBook.ApplicationFactory.Hex(), + "newApplication(address,address,address[],address,bytes32,bytes32)(address)", depInfo.AuthorityAddress, + addressBook.InputBox.Hex(), + supportedPortals, CONTRACT_OWNER_ADDRESS, hash, SALT) if err != nil { return DeploymentInfo{}, fmt.Errorf("could not create application: %v", err) } - depInfo.ApplicationAddress = contractAddresses[0] return depInfo, nil diff --git a/cmd/gen-devnet/main.go b/cmd/gen-devnet/main.go index 936a03bef..8c80bbb90 100644 --- a/cmd/gen-devnet/main.go +++ b/cmd/gen-devnet/main.go @@ -43,11 +43,11 @@ var ( ) func init() { - // Default path based on submodule location for rollups-contracts 1.2 + // Default path based on submodule location for rollups-contracts 2.0 Cmd.Flags().StringVarP(&rollupsContractsPath, "rollups-contracts-hardhat-path", "r", - "rollups-contracts/onchain/rollups", + "rollups-contracts", "path for the hardhat project used to deploy rollups-contracts") Cmd.Flags().StringVarP(&hashFile, diff --git a/cmd/gen-devnet/rollups.go b/cmd/gen-devnet/rollups.go index 889bbd915..aa34298f6 100644 --- a/cmd/gen-devnet/rollups.go +++ b/cmd/gen-devnet/rollups.go @@ -53,15 +53,15 @@ func copyContracts(ctx context.Context, srcDir string, destDir string) error { // Deploy rollups-contracts by using its own deployment script func deployContracts(ctx context.Context, execDir string) error { - cmdDir := execDir + "/rollups" + cmdDir := execDir + "/rollups-contracts" - cmd := exec.CommandContext(ctx, "yarn", "install") + cmd := exec.CommandContext(ctx, "pnpm", "install") cmd.Dir = cmdDir if err := cmd.Run(); err != nil { return fmt.Errorf("command '%v' failed with %v", cmd.String(), err) } - cmd = exec.CommandContext(ctx, "yarn", "deploy:development") + cmd = exec.CommandContext(ctx, "pnpm", "deploy:development") cmd.Env = os.Environ() cmd.Env = append(cmd.Env, "RPC_URL="+RPC_URL) cmd.Dir = cmdDir diff --git a/cmd/gen-devnet/types.go b/cmd/gen-devnet/types.go index 954a05882..4d637615f 100644 --- a/cmd/gen-devnet/types.go +++ b/cmd/gen-devnet/types.go @@ -5,6 +5,5 @@ package main type DeploymentInfo struct { AuthorityAddress string `json:"CARTESI_CONTRACTS_AUTHORITY_ADDRESS"` - HistoryAddress string `json:"CARTESI_CONTRACTS_HISTORY_ADDRESS"` ApplicationAddress string `json:"CARTESI_CONTRACTS_APPLICATION_ADDRESS"` } diff --git a/docs/config.md b/docs/config.md index a37d5e231..8abdf0592 100644 --- a/docs/config.md +++ b/docs/config.md @@ -78,7 +78,7 @@ Block subscription timeout in seconds. ## `CARTESI_BLOCKCHAIN_FINALITY_OFFSET` -The node assumes that blocks offseted by N from the current block have reached finality +The node assumes that blocks offsetted by N from the current block have reached finality (N is the read depth). * **Type:** `int` @@ -116,15 +116,9 @@ Address of the DApp's contract. * **Type:** `string` -## `CARTESI_CONTRACTS_AUTHORITY_ADDRESS` +## `CARTESI_CONTRACTS_ICONSENSUS_ADDRESS` -Address of the Authority contract. - -* **Type:** `string` - -## `CARTESI_CONTRACTS_HISTORY_ADDRESS` - -Address of the History contract. +Address of the IConsensus contract. * **Type:** `string` diff --git a/internal/node/config/config.go b/internal/node/config/config.go index 13a5d8b21..5d224ef5b 100644 --- a/internal/node/config/config.go +++ b/internal/node/config/config.go @@ -23,8 +23,7 @@ type NodeConfig struct { BlockchainFinalityOffset int BlockchainBlockTimeout int ContractsApplicationAddress string - ContractsHistoryAddress string - ContractsAuthorityAddress string + ContractsIConsensusAddress string ContractsInputBoxAddress string ContractsInputBoxDeploymentBlockNumber int64 SnapshotDir string @@ -81,8 +80,7 @@ func FromEnv() NodeConfig { config.BlockchainFinalityOffset = getBlockchainFinalityOffset() config.BlockchainBlockTimeout = getBlockchainBlockTimeout() config.ContractsApplicationAddress = getContractsApplicationAddress() - config.ContractsHistoryAddress = getContractsHistoryAddress() - config.ContractsAuthorityAddress = getContractsAuthorityAddress() + config.ContractsIConsensusAddress = getContractsIconsensusAddress() config.ContractsInputBoxAddress = getContractsInputBoxAddress() config.ContractsInputBoxDeploymentBlockNumber = getContractsInputBoxDeploymentBlockNumber() config.SnapshotDir = getSnapshotDir() diff --git a/internal/node/config/generate/Config.toml b/internal/node/config/generate/Config.toml index a5fbc716a..e310c8f54 100644 --- a/internal/node/config/generate/Config.toml +++ b/internal/node/config/generate/Config.toml @@ -74,7 +74,7 @@ If set to true the node will send transactions using the legacy gas fee model default = "10" go-type = "int" description = """ -The node assumes that blocks offseted by N from the current block have reached finality +The node assumes that blocks offsetted by N from the current block have reached finality (N is the read depth).""" [blockchain.CARTESI_BLOCKCHAIN_BLOCK_TIMEOUT] @@ -92,15 +92,10 @@ go-type = "string" description = """ Address of the DApp's contract.""" -[contracts.CARTESI_CONTRACTS_HISTORY_ADDRESS] +[contracts.CARTESI_CONTRACTS_ICONSENSUS_ADDRESS] go-type = "string" description = """ -Address of the History contract.""" - -[contracts.CARTESI_CONTRACTS_AUTHORITY_ADDRESS] -go-type = "string" -description = """ -Address of the Authority contract.""" +Address of the IConsensus contract.""" [contracts.CARTESI_CONTRACTS_INPUT_BOX_ADDRESS] go-type = "string" diff --git a/internal/node/config/generated.go b/internal/node/config/generated.go index ab4955025..2f92f219e 100644 --- a/internal/node/config/generated.go +++ b/internal/node/config/generated.go @@ -281,26 +281,14 @@ func getContractsApplicationAddress() string { return val } -func getContractsAuthorityAddress() string { - s, ok := os.LookupEnv("CARTESI_CONTRACTS_AUTHORITY_ADDRESS") +func getContractsIconsensusAddress() string { + s, ok := os.LookupEnv("CARTESI_CONTRACTS_ICONSENSUS_ADDRESS") if !ok { - panic("missing env var CARTESI_CONTRACTS_AUTHORITY_ADDRESS") + panic("missing env var CARTESI_CONTRACTS_ICONSENSUS_ADDRESS") } val, err := toString(s) if err != nil { - panic(fmt.Sprintf("failed to parse CARTESI_CONTRACTS_AUTHORITY_ADDRESS: %v", err)) - } - return val -} - -func getContractsHistoryAddress() string { - s, ok := os.LookupEnv("CARTESI_CONTRACTS_HISTORY_ADDRESS") - if !ok { - panic("missing env var CARTESI_CONTRACTS_HISTORY_ADDRESS") - } - val, err := toString(s) - if err != nil { - panic(fmt.Sprintf("failed to parse CARTESI_CONTRACTS_HISTORY_ADDRESS: %v", err)) + panic(fmt.Sprintf("failed to parse CARTESI_CONTRACTS_ICONSENSUS_ADDRESS: %v", err)) } return val } diff --git a/internal/node/machinehash.go b/internal/node/machinehash.go index 3d7d05418..3f487ddc3 100644 --- a/internal/node/machinehash.go +++ b/internal/node/machinehash.go @@ -9,7 +9,7 @@ import ( "os" "path" - "github.com/cartesi/rollups-node/pkg/contracts" + "github.com/cartesi/rollups-node/pkg/contracts/application" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" @@ -69,7 +69,7 @@ func getTemplateHash( if err != nil { return "", fmt.Errorf("get template hash: %w", err) } - cartesiApplication, err := contracts.NewCartesiDAppCaller( + cartesiApplication, err := application.NewApplicationCaller( common.HexToAddress(applicationAddress), client, ) diff --git a/internal/node/machinehash_test.go b/internal/node/machinehash_test.go index 2c690deb9..ff2d13cf3 100644 --- a/internal/node/machinehash_test.go +++ b/internal/node/machinehash_test.go @@ -64,7 +64,7 @@ func (s *ValidateMachineHashSuite) TestItFailsWhenContextIsCanceled() { err = validateMachineHash( ctx, machineDir, - addresses.GetTestBook().CartesiDApp.String(), + addresses.GetTestBook().Application.String(), blockchainHttpEndpoint, ) s.NotNil(err) @@ -91,7 +91,7 @@ func (s *ValidateMachineHashSuite) TestItSucceedsWhenHashesAreEqual() { err = validateMachineHash( ctx, machineDir, - addresses.GetTestBook().CartesiDApp.String(), + addresses.GetTestBook().Application.String(), blockchainHttpEndpoint, ) s.Nil(err) diff --git a/internal/node/services.go b/internal/node/services.go index dcf0a9793..b769f508a 100644 --- a/internal/node/services.go +++ b/internal/node/services.go @@ -69,12 +69,11 @@ func newAuthorityClaimer(c config.NodeConfig, workDir string) services.CommandSe s.Env = append(s.Env, fmt.Sprintf("TX_DEFAULT_CONFIRMATIONS=%v", c.BlockchainFinalityOffset)) s.Env = append(s.Env, fmt.Sprintf("REDIS_ENDPOINT=%v", getRedisEndpoint(c))) - s.Env = append(s.Env, fmt.Sprintf("HISTORY_ADDRESS=%v", c.ContractsHistoryAddress)) - s.Env = append(s.Env, fmt.Sprintf("AUTHORITY_ADDRESS=%v", c.ContractsAuthorityAddress)) + s.Env = append(s.Env, fmt.Sprintf("ICONSENSUS_ADDRESS=%v", c.ContractsIConsensusAddress)) s.Env = append(s.Env, fmt.Sprintf("INPUT_BOX_ADDRESS=%v", c.ContractsInputBoxAddress)) s.Env = append(s.Env, fmt.Sprintf("GENESIS_BLOCK=%v", c.ContractsInputBoxDeploymentBlockNumber)) - s.Env = append(s.Env, fmt.Sprintf("AUTHORITY_CLAIMER_HTTP_SERVER_PORT=%v", + s.Env = append(s.Env, fmt.Sprintf("HTTP_SERVER_PORT=%v", getPort(c, portOffsetAuthorityClaimer))) switch auth := c.Auth.(type) { case config.AuthPrivateKey: diff --git a/pkg/addresses/addresses.go b/pkg/addresses/addresses.go index 8989e65b6..8a4b710a2 100644 --- a/pkg/addresses/addresses.go +++ b/pkg/addresses/addresses.go @@ -19,36 +19,32 @@ import ( // List of contract addresses. type Book struct { - AuthorityHistoryPairFactory common.Address - CartesiDAppFactory common.Address - DAppAddressRelay common.Address - ERC1155BatchPortal common.Address - ERC1155SinglePortal common.Address - ERC20Portal common.Address - ERC721Portal common.Address - EtherPortal common.Address - InputBox common.Address - CartesiDApp common.Address - HistoryAddress common.Address - AuthorityAddress common.Address + Application common.Address + ApplicationFactory common.Address + Authority common.Address + AuthorityFactory common.Address + DAppAddressRelay common.Address + ERC1155BatchPortal common.Address + ERC1155SinglePortal common.Address + ERC20Portal common.Address + ERC721Portal common.Address + EtherPortal common.Address + InputBox common.Address } // Get the addresses for the test environment. func GetTestBook() *Book { return &Book{ - AuthorityHistoryPairFactory: common. - HexToAddress("0x3890A047Cf9Af60731E80B2105362BbDCD70142D"), - CartesiDAppFactory: common.HexToAddress("0x7122cd1221C20892234186facfE8615e6743Ab02"), - DAppAddressRelay: common.HexToAddress("0xF5DE34d6BbC0446E2a45719E718efEbaaE179daE"), - ERC1155BatchPortal: common.HexToAddress("0xedB53860A6B52bbb7561Ad596416ee9965B055Aa"), - ERC1155SinglePortal: common.HexToAddress("0x7CFB0193Ca87eB6e48056885E026552c3A941FC4"), - ERC20Portal: common.HexToAddress("0x9C21AEb2093C32DDbC53eEF24B873BDCd1aDa1DB"), - ERC721Portal: common.HexToAddress("0x237F8DD094C0e47f4236f12b4Fa01d6Dae89fb87"), - EtherPortal: common.HexToAddress("0xFfdbe43d4c855BF7e0f105c400A50857f53AB044"), - InputBox: common.HexToAddress("0x59b22D57D4f067708AB0c00552767405926dc768"), - CartesiDApp: common.HexToAddress("0x7C54E3f7A8070a54223469965A871fB8f6f88c22"), - HistoryAddress: common.HexToAddress("0x325272217ae6815b494bF38cED004c5Eb8a7CdA7"), - AuthorityAddress: common.HexToAddress("0x58c93F83fb3304730C95aad2E360cdb88b782010"), + Application: common.HexToAddress("0xb72c832dDeA10326143831F1E5F1646920C9c990"), + ApplicationFactory: common.HexToAddress("0x39cc8d1faB70F713784032f166aB7Fe3B4801144"), + Authority: common.HexToAddress("0x77e5a5fb18F72b5106621f66C704c006c6dB4578"), + AuthorityFactory: common.HexToAddress("0x5EF4260c72a7A8df752AFF49aC46Ba741754E04a"), + ERC1155BatchPortal: common.HexToAddress("0x83D7fc8A2A2535A17b037598bad23562215a752A"), + ERC1155SinglePortal: common.HexToAddress("0x77b5b758f43E789E0858a766934bE08B2CD65feA"), + ERC20Portal: common.HexToAddress("0x8f4b3F53699EDd5374c3374b4Ee1CcA3d23E95Ab"), + ERC721Portal: common.HexToAddress("0xDF9d6F65E9a053FbaFF9eAaf0b522f1b35Dfd05B"), + EtherPortal: common.HexToAddress("0xF03FB966604bF02073b87b4586b3edBC201f73A6"), + InputBox: common.HexToAddress("0xA1b8EB1F13d8D5Db976a653BbDF8972cfD14691C"), } } diff --git a/pkg/contracts/application/application.go b/pkg/contracts/application/application.go new file mode 100644 index 000000000..709538064 --- /dev/null +++ b/pkg/contracts/application/application.go @@ -0,0 +1,1036 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package application + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// InputRange is an auto generated low-level Go binding around an user-defined struct. +type InputRange struct { + FirstIndex uint64 + LastIndex uint64 +} + +// OutputValidityProof is an auto generated low-level Go binding around an user-defined struct. +type OutputValidityProof struct { + InputRange InputRange + InputIndexWithinEpoch uint64 + OutputIndexWithinInput uint64 + OutputHashesRootHash [32]byte + OutputsEpochRootHash [32]byte + MachineStateHash [32]byte + OutputHashInOutputHashesSiblings [][32]byte + OutputHashesInEpochSiblings [][32]byte +} + +// ApplicationMetaData contains all meta data concerning the Application contract. +var ApplicationMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"contractIConsensus\",\"name\":\"consensus\",\"type\":\"address\"},{\"internalType\":\"contractIInputBox\",\"name\":\"inputBox\",\"type\":\"address\"},{\"internalType\":\"contractIPortal[]\",\"name\":\"portals\",\"type\":\"address[]\"},{\"internalType\":\"address\",\"name\":\"initialOwner\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"templateHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"IncorrectEpochHash\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectOutputHashesRootHash\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectOutputsEpochRootHash\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"inputIndex\",\"type\":\"uint256\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"firstIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"lastIndex\",\"type\":\"uint64\"}],\"internalType\":\"structInputRange\",\"name\":\"inputRange\",\"type\":\"tuple\"}],\"name\":\"InputIndexOutOfRange\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"output\",\"type\":\"bytes\"}],\"name\":\"OutputNotExecutable\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"output\",\"type\":\"bytes\"}],\"name\":\"OutputNotReexecutable\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"owner\",\"type\":\"address\"}],\"name\":\"OwnableInvalidOwner\",\"type\":\"error\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"account\",\"type\":\"address\"}],\"name\":\"OwnableUnauthorizedAccount\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"ReentrancyGuardReentrantCall\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"contractIConsensus\",\"name\":\"newConsensus\",\"type\":\"address\"}],\"name\":\"NewConsensus\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"inputIndex\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"uint64\",\"name\":\"outputIndexWithinInput\",\"type\":\"uint64\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"output\",\"type\":\"bytes\"}],\"name\":\"OutputExecuted\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"output\",\"type\":\"bytes\"},{\"components\":[{\"components\":[{\"internalType\":\"uint64\",\"name\":\"firstIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"lastIndex\",\"type\":\"uint64\"}],\"internalType\":\"structInputRange\",\"name\":\"inputRange\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"inputIndexWithinEpoch\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"outputIndexWithinInput\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"outputHashesRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"outputsEpochRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"machineStateHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32[]\",\"name\":\"outputHashInOutputHashesSiblings\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"outputHashesInEpochSiblings\",\"type\":\"bytes32[]\"}],\"internalType\":\"structOutputValidityProof\",\"name\":\"proof\",\"type\":\"tuple\"}],\"name\":\"executeOutput\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConsensus\",\"outputs\":[{\"internalType\":\"contractIConsensus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getInputBox\",\"outputs\":[{\"internalType\":\"contractIInputBox\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getPortals\",\"outputs\":[{\"internalType\":\"contractIPortal[]\",\"name\":\"\",\"type\":\"address[]\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTemplateHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractIConsensus\",\"name\":\"newConsensus\",\"type\":\"address\"}],\"name\":\"migrateToConsensus\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"onERC1155BatchReceived\",\"outputs\":[{\"internalType\":\"bytes4\",\"name\":\"\",\"type\":\"bytes4\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"onERC1155Received\",\"outputs\":[{\"internalType\":\"bytes4\",\"name\":\"\",\"type\":\"bytes4\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"onERC721Received\",\"outputs\":[{\"internalType\":\"bytes4\",\"name\":\"\",\"type\":\"bytes4\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"output\",\"type\":\"bytes\"},{\"components\":[{\"components\":[{\"internalType\":\"uint64\",\"name\":\"firstIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"lastIndex\",\"type\":\"uint64\"}],\"internalType\":\"structInputRange\",\"name\":\"inputRange\",\"type\":\"tuple\"},{\"internalType\":\"uint64\",\"name\":\"inputIndexWithinEpoch\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"outputIndexWithinInput\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"outputHashesRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"outputsEpochRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"machineStateHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32[]\",\"name\":\"outputHashInOutputHashesSiblings\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"outputHashesInEpochSiblings\",\"type\":\"bytes32[]\"}],\"internalType\":\"structOutputValidityProof\",\"name\":\"proof\",\"type\":\"tuple\"}],\"name\":\"validateOutput\",\"outputs\":[],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"inputIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"outputIndexWithinInput\",\"type\":\"uint256\"}],\"name\":\"wasOutputExecuted\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", +} + +// ApplicationABI is the input ABI used to generate the binding from. +// Deprecated: Use ApplicationMetaData.ABI instead. +var ApplicationABI = ApplicationMetaData.ABI + +// Application is an auto generated Go binding around an Ethereum contract. +type Application struct { + ApplicationCaller // Read-only binding to the contract + ApplicationTransactor // Write-only binding to the contract + ApplicationFilterer // Log filterer for contract events +} + +// ApplicationCaller is an auto generated read-only Go binding around an Ethereum contract. +type ApplicationCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ApplicationTransactor is an auto generated write-only Go binding around an Ethereum contract. +type ApplicationTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ApplicationFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type ApplicationFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// ApplicationSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type ApplicationSession struct { + Contract *Application // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ApplicationCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type ApplicationCallerSession struct { + Contract *ApplicationCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// ApplicationTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type ApplicationTransactorSession struct { + Contract *ApplicationTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// ApplicationRaw is an auto generated low-level Go binding around an Ethereum contract. +type ApplicationRaw struct { + Contract *Application // Generic contract binding to access the raw methods on +} + +// ApplicationCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type ApplicationCallerRaw struct { + Contract *ApplicationCaller // Generic read-only contract binding to access the raw methods on +} + +// ApplicationTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type ApplicationTransactorRaw struct { + Contract *ApplicationTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewApplication creates a new instance of Application, bound to a specific deployed contract. +func NewApplication(address common.Address, backend bind.ContractBackend) (*Application, error) { + contract, err := bindApplication(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Application{ApplicationCaller: ApplicationCaller{contract: contract}, ApplicationTransactor: ApplicationTransactor{contract: contract}, ApplicationFilterer: ApplicationFilterer{contract: contract}}, nil +} + +// NewApplicationCaller creates a new read-only instance of Application, bound to a specific deployed contract. +func NewApplicationCaller(address common.Address, caller bind.ContractCaller) (*ApplicationCaller, error) { + contract, err := bindApplication(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &ApplicationCaller{contract: contract}, nil +} + +// NewApplicationTransactor creates a new write-only instance of Application, bound to a specific deployed contract. +func NewApplicationTransactor(address common.Address, transactor bind.ContractTransactor) (*ApplicationTransactor, error) { + contract, err := bindApplication(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &ApplicationTransactor{contract: contract}, nil +} + +// NewApplicationFilterer creates a new log filterer instance of Application, bound to a specific deployed contract. +func NewApplicationFilterer(address common.Address, filterer bind.ContractFilterer) (*ApplicationFilterer, error) { + contract, err := bindApplication(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &ApplicationFilterer{contract: contract}, nil +} + +// bindApplication binds a generic wrapper to an already deployed contract. +func bindApplication(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := ApplicationMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Application *ApplicationRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Application.Contract.ApplicationCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Application *ApplicationRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Application.Contract.ApplicationTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Application *ApplicationRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Application.Contract.ApplicationTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Application *ApplicationCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Application.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Application *ApplicationTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Application.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Application *ApplicationTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Application.Contract.contract.Transact(opts, method, params...) +} + +// GetConsensus is a free data retrieval call binding the contract method 0x179e740b. +// +// Solidity: function getConsensus() view returns(address) +func (_Application *ApplicationCaller) GetConsensus(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Application.contract.Call(opts, &out, "getConsensus") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// GetConsensus is a free data retrieval call binding the contract method 0x179e740b. +// +// Solidity: function getConsensus() view returns(address) +func (_Application *ApplicationSession) GetConsensus() (common.Address, error) { + return _Application.Contract.GetConsensus(&_Application.CallOpts) +} + +// GetConsensus is a free data retrieval call binding the contract method 0x179e740b. +// +// Solidity: function getConsensus() view returns(address) +func (_Application *ApplicationCallerSession) GetConsensus() (common.Address, error) { + return _Application.Contract.GetConsensus(&_Application.CallOpts) +} + +// GetInputBox is a free data retrieval call binding the contract method 0x00aace9a. +// +// Solidity: function getInputBox() view returns(address) +func (_Application *ApplicationCaller) GetInputBox(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Application.contract.Call(opts, &out, "getInputBox") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// GetInputBox is a free data retrieval call binding the contract method 0x00aace9a. +// +// Solidity: function getInputBox() view returns(address) +func (_Application *ApplicationSession) GetInputBox() (common.Address, error) { + return _Application.Contract.GetInputBox(&_Application.CallOpts) +} + +// GetInputBox is a free data retrieval call binding the contract method 0x00aace9a. +// +// Solidity: function getInputBox() view returns(address) +func (_Application *ApplicationCallerSession) GetInputBox() (common.Address, error) { + return _Application.Contract.GetInputBox(&_Application.CallOpts) +} + +// GetPortals is a free data retrieval call binding the contract method 0x108e8c1d. +// +// Solidity: function getPortals() view returns(address[]) +func (_Application *ApplicationCaller) GetPortals(opts *bind.CallOpts) ([]common.Address, error) { + var out []interface{} + err := _Application.contract.Call(opts, &out, "getPortals") + + if err != nil { + return *new([]common.Address), err + } + + out0 := *abi.ConvertType(out[0], new([]common.Address)).(*[]common.Address) + + return out0, err + +} + +// GetPortals is a free data retrieval call binding the contract method 0x108e8c1d. +// +// Solidity: function getPortals() view returns(address[]) +func (_Application *ApplicationSession) GetPortals() ([]common.Address, error) { + return _Application.Contract.GetPortals(&_Application.CallOpts) +} + +// GetPortals is a free data retrieval call binding the contract method 0x108e8c1d. +// +// Solidity: function getPortals() view returns(address[]) +func (_Application *ApplicationCallerSession) GetPortals() ([]common.Address, error) { + return _Application.Contract.GetPortals(&_Application.CallOpts) +} + +// GetTemplateHash is a free data retrieval call binding the contract method 0x61b12c66. +// +// Solidity: function getTemplateHash() view returns(bytes32) +func (_Application *ApplicationCaller) GetTemplateHash(opts *bind.CallOpts) ([32]byte, error) { + var out []interface{} + err := _Application.contract.Call(opts, &out, "getTemplateHash") + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +// GetTemplateHash is a free data retrieval call binding the contract method 0x61b12c66. +// +// Solidity: function getTemplateHash() view returns(bytes32) +func (_Application *ApplicationSession) GetTemplateHash() ([32]byte, error) { + return _Application.Contract.GetTemplateHash(&_Application.CallOpts) +} + +// GetTemplateHash is a free data retrieval call binding the contract method 0x61b12c66. +// +// Solidity: function getTemplateHash() view returns(bytes32) +func (_Application *ApplicationCallerSession) GetTemplateHash() ([32]byte, error) { + return _Application.Contract.GetTemplateHash(&_Application.CallOpts) +} + +// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. +// +// Solidity: function owner() view returns(address) +func (_Application *ApplicationCaller) Owner(opts *bind.CallOpts) (common.Address, error) { + var out []interface{} + err := _Application.contract.Call(opts, &out, "owner") + + if err != nil { + return *new(common.Address), err + } + + out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) + + return out0, err + +} + +// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. +// +// Solidity: function owner() view returns(address) +func (_Application *ApplicationSession) Owner() (common.Address, error) { + return _Application.Contract.Owner(&_Application.CallOpts) +} + +// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. +// +// Solidity: function owner() view returns(address) +func (_Application *ApplicationCallerSession) Owner() (common.Address, error) { + return _Application.Contract.Owner(&_Application.CallOpts) +} + +// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. +// +// Solidity: function supportsInterface(bytes4 interfaceId) view returns(bool) +func (_Application *ApplicationCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { + var out []interface{} + err := _Application.contract.Call(opts, &out, "supportsInterface", interfaceId) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. +// +// Solidity: function supportsInterface(bytes4 interfaceId) view returns(bool) +func (_Application *ApplicationSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _Application.Contract.SupportsInterface(&_Application.CallOpts, interfaceId) +} + +// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. +// +// Solidity: function supportsInterface(bytes4 interfaceId) view returns(bool) +func (_Application *ApplicationCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { + return _Application.Contract.SupportsInterface(&_Application.CallOpts, interfaceId) +} + +// ValidateOutput is a free data retrieval call binding the contract method 0x4dcea155. +// +// Solidity: function validateOutput(bytes output, ((uint64,uint64),uint64,uint64,bytes32,bytes32,bytes32,bytes32[],bytes32[]) proof) view returns() +func (_Application *ApplicationCaller) ValidateOutput(opts *bind.CallOpts, output []byte, proof OutputValidityProof) error { + var out []interface{} + err := _Application.contract.Call(opts, &out, "validateOutput", output, proof) + + if err != nil { + return err + } + + return err + +} + +// ValidateOutput is a free data retrieval call binding the contract method 0x4dcea155. +// +// Solidity: function validateOutput(bytes output, ((uint64,uint64),uint64,uint64,bytes32,bytes32,bytes32,bytes32[],bytes32[]) proof) view returns() +func (_Application *ApplicationSession) ValidateOutput(output []byte, proof OutputValidityProof) error { + return _Application.Contract.ValidateOutput(&_Application.CallOpts, output, proof) +} + +// ValidateOutput is a free data retrieval call binding the contract method 0x4dcea155. +// +// Solidity: function validateOutput(bytes output, ((uint64,uint64),uint64,uint64,bytes32,bytes32,bytes32,bytes32[],bytes32[]) proof) view returns() +func (_Application *ApplicationCallerSession) ValidateOutput(output []byte, proof OutputValidityProof) error { + return _Application.Contract.ValidateOutput(&_Application.CallOpts, output, proof) +} + +// WasOutputExecuted is a free data retrieval call binding the contract method 0x24523192. +// +// Solidity: function wasOutputExecuted(uint256 inputIndex, uint256 outputIndexWithinInput) view returns(bool) +func (_Application *ApplicationCaller) WasOutputExecuted(opts *bind.CallOpts, inputIndex *big.Int, outputIndexWithinInput *big.Int) (bool, error) { + var out []interface{} + err := _Application.contract.Call(opts, &out, "wasOutputExecuted", inputIndex, outputIndexWithinInput) + + if err != nil { + return *new(bool), err + } + + out0 := *abi.ConvertType(out[0], new(bool)).(*bool) + + return out0, err + +} + +// WasOutputExecuted is a free data retrieval call binding the contract method 0x24523192. +// +// Solidity: function wasOutputExecuted(uint256 inputIndex, uint256 outputIndexWithinInput) view returns(bool) +func (_Application *ApplicationSession) WasOutputExecuted(inputIndex *big.Int, outputIndexWithinInput *big.Int) (bool, error) { + return _Application.Contract.WasOutputExecuted(&_Application.CallOpts, inputIndex, outputIndexWithinInput) +} + +// WasOutputExecuted is a free data retrieval call binding the contract method 0x24523192. +// +// Solidity: function wasOutputExecuted(uint256 inputIndex, uint256 outputIndexWithinInput) view returns(bool) +func (_Application *ApplicationCallerSession) WasOutputExecuted(inputIndex *big.Int, outputIndexWithinInput *big.Int) (bool, error) { + return _Application.Contract.WasOutputExecuted(&_Application.CallOpts, inputIndex, outputIndexWithinInput) +} + +// ExecuteOutput is a paid mutator transaction binding the contract method 0xdbe1a6eb. +// +// Solidity: function executeOutput(bytes output, ((uint64,uint64),uint64,uint64,bytes32,bytes32,bytes32,bytes32[],bytes32[]) proof) returns() +func (_Application *ApplicationTransactor) ExecuteOutput(opts *bind.TransactOpts, output []byte, proof OutputValidityProof) (*types.Transaction, error) { + return _Application.contract.Transact(opts, "executeOutput", output, proof) +} + +// ExecuteOutput is a paid mutator transaction binding the contract method 0xdbe1a6eb. +// +// Solidity: function executeOutput(bytes output, ((uint64,uint64),uint64,uint64,bytes32,bytes32,bytes32,bytes32[],bytes32[]) proof) returns() +func (_Application *ApplicationSession) ExecuteOutput(output []byte, proof OutputValidityProof) (*types.Transaction, error) { + return _Application.Contract.ExecuteOutput(&_Application.TransactOpts, output, proof) +} + +// ExecuteOutput is a paid mutator transaction binding the contract method 0xdbe1a6eb. +// +// Solidity: function executeOutput(bytes output, ((uint64,uint64),uint64,uint64,bytes32,bytes32,bytes32,bytes32[],bytes32[]) proof) returns() +func (_Application *ApplicationTransactorSession) ExecuteOutput(output []byte, proof OutputValidityProof) (*types.Transaction, error) { + return _Application.Contract.ExecuteOutput(&_Application.TransactOpts, output, proof) +} + +// MigrateToConsensus is a paid mutator transaction binding the contract method 0xfc411683. +// +// Solidity: function migrateToConsensus(address newConsensus) returns() +func (_Application *ApplicationTransactor) MigrateToConsensus(opts *bind.TransactOpts, newConsensus common.Address) (*types.Transaction, error) { + return _Application.contract.Transact(opts, "migrateToConsensus", newConsensus) +} + +// MigrateToConsensus is a paid mutator transaction binding the contract method 0xfc411683. +// +// Solidity: function migrateToConsensus(address newConsensus) returns() +func (_Application *ApplicationSession) MigrateToConsensus(newConsensus common.Address) (*types.Transaction, error) { + return _Application.Contract.MigrateToConsensus(&_Application.TransactOpts, newConsensus) +} + +// MigrateToConsensus is a paid mutator transaction binding the contract method 0xfc411683. +// +// Solidity: function migrateToConsensus(address newConsensus) returns() +func (_Application *ApplicationTransactorSession) MigrateToConsensus(newConsensus common.Address) (*types.Transaction, error) { + return _Application.Contract.MigrateToConsensus(&_Application.TransactOpts, newConsensus) +} + +// OnERC1155BatchReceived is a paid mutator transaction binding the contract method 0xbc197c81. +// +// Solidity: function onERC1155BatchReceived(address , address , uint256[] , uint256[] , bytes ) returns(bytes4) +func (_Application *ApplicationTransactor) OnERC1155BatchReceived(opts *bind.TransactOpts, arg0 common.Address, arg1 common.Address, arg2 []*big.Int, arg3 []*big.Int, arg4 []byte) (*types.Transaction, error) { + return _Application.contract.Transact(opts, "onERC1155BatchReceived", arg0, arg1, arg2, arg3, arg4) +} + +// OnERC1155BatchReceived is a paid mutator transaction binding the contract method 0xbc197c81. +// +// Solidity: function onERC1155BatchReceived(address , address , uint256[] , uint256[] , bytes ) returns(bytes4) +func (_Application *ApplicationSession) OnERC1155BatchReceived(arg0 common.Address, arg1 common.Address, arg2 []*big.Int, arg3 []*big.Int, arg4 []byte) (*types.Transaction, error) { + return _Application.Contract.OnERC1155BatchReceived(&_Application.TransactOpts, arg0, arg1, arg2, arg3, arg4) +} + +// OnERC1155BatchReceived is a paid mutator transaction binding the contract method 0xbc197c81. +// +// Solidity: function onERC1155BatchReceived(address , address , uint256[] , uint256[] , bytes ) returns(bytes4) +func (_Application *ApplicationTransactorSession) OnERC1155BatchReceived(arg0 common.Address, arg1 common.Address, arg2 []*big.Int, arg3 []*big.Int, arg4 []byte) (*types.Transaction, error) { + return _Application.Contract.OnERC1155BatchReceived(&_Application.TransactOpts, arg0, arg1, arg2, arg3, arg4) +} + +// OnERC1155Received is a paid mutator transaction binding the contract method 0xf23a6e61. +// +// Solidity: function onERC1155Received(address , address , uint256 , uint256 , bytes ) returns(bytes4) +func (_Application *ApplicationTransactor) OnERC1155Received(opts *bind.TransactOpts, arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 *big.Int, arg4 []byte) (*types.Transaction, error) { + return _Application.contract.Transact(opts, "onERC1155Received", arg0, arg1, arg2, arg3, arg4) +} + +// OnERC1155Received is a paid mutator transaction binding the contract method 0xf23a6e61. +// +// Solidity: function onERC1155Received(address , address , uint256 , uint256 , bytes ) returns(bytes4) +func (_Application *ApplicationSession) OnERC1155Received(arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 *big.Int, arg4 []byte) (*types.Transaction, error) { + return _Application.Contract.OnERC1155Received(&_Application.TransactOpts, arg0, arg1, arg2, arg3, arg4) +} + +// OnERC1155Received is a paid mutator transaction binding the contract method 0xf23a6e61. +// +// Solidity: function onERC1155Received(address , address , uint256 , uint256 , bytes ) returns(bytes4) +func (_Application *ApplicationTransactorSession) OnERC1155Received(arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 *big.Int, arg4 []byte) (*types.Transaction, error) { + return _Application.Contract.OnERC1155Received(&_Application.TransactOpts, arg0, arg1, arg2, arg3, arg4) +} + +// OnERC721Received is a paid mutator transaction binding the contract method 0x150b7a02. +// +// Solidity: function onERC721Received(address , address , uint256 , bytes ) returns(bytes4) +func (_Application *ApplicationTransactor) OnERC721Received(opts *bind.TransactOpts, arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 []byte) (*types.Transaction, error) { + return _Application.contract.Transact(opts, "onERC721Received", arg0, arg1, arg2, arg3) +} + +// OnERC721Received is a paid mutator transaction binding the contract method 0x150b7a02. +// +// Solidity: function onERC721Received(address , address , uint256 , bytes ) returns(bytes4) +func (_Application *ApplicationSession) OnERC721Received(arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 []byte) (*types.Transaction, error) { + return _Application.Contract.OnERC721Received(&_Application.TransactOpts, arg0, arg1, arg2, arg3) +} + +// OnERC721Received is a paid mutator transaction binding the contract method 0x150b7a02. +// +// Solidity: function onERC721Received(address , address , uint256 , bytes ) returns(bytes4) +func (_Application *ApplicationTransactorSession) OnERC721Received(arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 []byte) (*types.Transaction, error) { + return _Application.Contract.OnERC721Received(&_Application.TransactOpts, arg0, arg1, arg2, arg3) +} + +// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. +// +// Solidity: function renounceOwnership() returns() +func (_Application *ApplicationTransactor) RenounceOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Application.contract.Transact(opts, "renounceOwnership") +} + +// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. +// +// Solidity: function renounceOwnership() returns() +func (_Application *ApplicationSession) RenounceOwnership() (*types.Transaction, error) { + return _Application.Contract.RenounceOwnership(&_Application.TransactOpts) +} + +// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. +// +// Solidity: function renounceOwnership() returns() +func (_Application *ApplicationTransactorSession) RenounceOwnership() (*types.Transaction, error) { + return _Application.Contract.RenounceOwnership(&_Application.TransactOpts) +} + +// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. +// +// Solidity: function transferOwnership(address newOwner) returns() +func (_Application *ApplicationTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { + return _Application.contract.Transact(opts, "transferOwnership", newOwner) +} + +// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. +// +// Solidity: function transferOwnership(address newOwner) returns() +func (_Application *ApplicationSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _Application.Contract.TransferOwnership(&_Application.TransactOpts, newOwner) +} + +// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. +// +// Solidity: function transferOwnership(address newOwner) returns() +func (_Application *ApplicationTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { + return _Application.Contract.TransferOwnership(&_Application.TransactOpts, newOwner) +} + +// Receive is a paid mutator transaction binding the contract receive function. +// +// Solidity: receive() payable returns() +func (_Application *ApplicationTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Application.contract.RawTransact(opts, nil) // calldata is disallowed for receive function +} + +// Receive is a paid mutator transaction binding the contract receive function. +// +// Solidity: receive() payable returns() +func (_Application *ApplicationSession) Receive() (*types.Transaction, error) { + return _Application.Contract.Receive(&_Application.TransactOpts) +} + +// Receive is a paid mutator transaction binding the contract receive function. +// +// Solidity: receive() payable returns() +func (_Application *ApplicationTransactorSession) Receive() (*types.Transaction, error) { + return _Application.Contract.Receive(&_Application.TransactOpts) +} + +// ApplicationNewConsensusIterator is returned from FilterNewConsensus and is used to iterate over the raw logs and unpacked data for NewConsensus events raised by the Application contract. +type ApplicationNewConsensusIterator struct { + Event *ApplicationNewConsensus // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *ApplicationNewConsensusIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(ApplicationNewConsensus) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(ApplicationNewConsensus) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *ApplicationNewConsensusIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *ApplicationNewConsensusIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// ApplicationNewConsensus represents a NewConsensus event raised by the Application contract. +type ApplicationNewConsensus struct { + NewConsensus common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterNewConsensus is a free log retrieval operation binding the contract event 0x4991c6f37185659e276ff918a96f3e20e6c5abcd8c9aab450dc19c2f7ad35cb5. +// +// Solidity: event NewConsensus(address newConsensus) +func (_Application *ApplicationFilterer) FilterNewConsensus(opts *bind.FilterOpts) (*ApplicationNewConsensusIterator, error) { + + logs, sub, err := _Application.contract.FilterLogs(opts, "NewConsensus") + if err != nil { + return nil, err + } + return &ApplicationNewConsensusIterator{contract: _Application.contract, event: "NewConsensus", logs: logs, sub: sub}, nil +} + +// WatchNewConsensus is a free log subscription operation binding the contract event 0x4991c6f37185659e276ff918a96f3e20e6c5abcd8c9aab450dc19c2f7ad35cb5. +// +// Solidity: event NewConsensus(address newConsensus) +func (_Application *ApplicationFilterer) WatchNewConsensus(opts *bind.WatchOpts, sink chan<- *ApplicationNewConsensus) (event.Subscription, error) { + + logs, sub, err := _Application.contract.WatchLogs(opts, "NewConsensus") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(ApplicationNewConsensus) + if err := _Application.contract.UnpackLog(event, "NewConsensus", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseNewConsensus is a log parse operation binding the contract event 0x4991c6f37185659e276ff918a96f3e20e6c5abcd8c9aab450dc19c2f7ad35cb5. +// +// Solidity: event NewConsensus(address newConsensus) +func (_Application *ApplicationFilterer) ParseNewConsensus(log types.Log) (*ApplicationNewConsensus, error) { + event := new(ApplicationNewConsensus) + if err := _Application.contract.UnpackLog(event, "NewConsensus", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// ApplicationOutputExecutedIterator is returned from FilterOutputExecuted and is used to iterate over the raw logs and unpacked data for OutputExecuted events raised by the Application contract. +type ApplicationOutputExecutedIterator struct { + Event *ApplicationOutputExecuted // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *ApplicationOutputExecutedIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(ApplicationOutputExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(ApplicationOutputExecuted) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *ApplicationOutputExecutedIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *ApplicationOutputExecutedIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// ApplicationOutputExecuted represents a OutputExecuted event raised by the Application contract. +type ApplicationOutputExecuted struct { + InputIndex uint64 + OutputIndexWithinInput uint64 + Output []byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterOutputExecuted is a free log retrieval operation binding the contract event 0xd39d8e3e610251d36b5464d9cabbd8fa8319fe6cff76941ce041ecf04669726f. +// +// Solidity: event OutputExecuted(uint64 inputIndex, uint64 outputIndexWithinInput, bytes output) +func (_Application *ApplicationFilterer) FilterOutputExecuted(opts *bind.FilterOpts) (*ApplicationOutputExecutedIterator, error) { + + logs, sub, err := _Application.contract.FilterLogs(opts, "OutputExecuted") + if err != nil { + return nil, err + } + return &ApplicationOutputExecutedIterator{contract: _Application.contract, event: "OutputExecuted", logs: logs, sub: sub}, nil +} + +// WatchOutputExecuted is a free log subscription operation binding the contract event 0xd39d8e3e610251d36b5464d9cabbd8fa8319fe6cff76941ce041ecf04669726f. +// +// Solidity: event OutputExecuted(uint64 inputIndex, uint64 outputIndexWithinInput, bytes output) +func (_Application *ApplicationFilterer) WatchOutputExecuted(opts *bind.WatchOpts, sink chan<- *ApplicationOutputExecuted) (event.Subscription, error) { + + logs, sub, err := _Application.contract.WatchLogs(opts, "OutputExecuted") + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(ApplicationOutputExecuted) + if err := _Application.contract.UnpackLog(event, "OutputExecuted", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseOutputExecuted is a log parse operation binding the contract event 0xd39d8e3e610251d36b5464d9cabbd8fa8319fe6cff76941ce041ecf04669726f. +// +// Solidity: event OutputExecuted(uint64 inputIndex, uint64 outputIndexWithinInput, bytes output) +func (_Application *ApplicationFilterer) ParseOutputExecuted(log types.Log) (*ApplicationOutputExecuted, error) { + event := new(ApplicationOutputExecuted) + if err := _Application.contract.UnpackLog(event, "OutputExecuted", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// ApplicationOwnershipTransferredIterator is returned from FilterOwnershipTransferred and is used to iterate over the raw logs and unpacked data for OwnershipTransferred events raised by the Application contract. +type ApplicationOwnershipTransferredIterator struct { + Event *ApplicationOwnershipTransferred // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *ApplicationOwnershipTransferredIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(ApplicationOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(ApplicationOwnershipTransferred) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *ApplicationOwnershipTransferredIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *ApplicationOwnershipTransferredIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// ApplicationOwnershipTransferred represents a OwnershipTransferred event raised by the Application contract. +type ApplicationOwnershipTransferred struct { + PreviousOwner common.Address + NewOwner common.Address + Raw types.Log // Blockchain specific contextual infos +} + +// FilterOwnershipTransferred is a free log retrieval operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// +// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) +func (_Application *ApplicationFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*ApplicationOwnershipTransferredIterator, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _Application.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return &ApplicationOwnershipTransferredIterator{contract: _Application.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil +} + +// WatchOwnershipTransferred is a free log subscription operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// +// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) +func (_Application *ApplicationFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *ApplicationOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { + + var previousOwnerRule []interface{} + for _, previousOwnerItem := range previousOwner { + previousOwnerRule = append(previousOwnerRule, previousOwnerItem) + } + var newOwnerRule []interface{} + for _, newOwnerItem := range newOwner { + newOwnerRule = append(newOwnerRule, newOwnerItem) + } + + logs, sub, err := _Application.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(ApplicationOwnershipTransferred) + if err := _Application.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseOwnershipTransferred is a log parse operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. +// +// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) +func (_Application *ApplicationFilterer) ParseOwnershipTransferred(log types.Log) (*ApplicationOwnershipTransferred, error) { + event := new(ApplicationOwnershipTransferred) + if err := _Application.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} diff --git a/pkg/contracts/authority.go b/pkg/contracts/authority.go deleted file mode 100644 index e584ecce6..000000000 --- a/pkg/contracts/authority.go +++ /dev/null @@ -1,844 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contracts - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription - _ = abi.ConvertType -) - -// AuthorityMetaData contains all meta data concerning the Authority contract. -var AuthorityMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"AuthorityWithdrawalFailed\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"application\",\"type\":\"address\"}],\"name\":\"ApplicationJoined\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"contractIHistory\",\"name\":\"history\",\"type\":\"address\"}],\"name\":\"NewHistory\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_dapp\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_proofContext\",\"type\":\"bytes\"}],\"name\":\"getClaim\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getHistory\",\"outputs\":[{\"internalType\":\"contractIHistory\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"join\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_consensus\",\"type\":\"address\"}],\"name\":\"migrateHistoryToConsensus\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractIHistory\",\"name\":\"_history\",\"type\":\"address\"}],\"name\":\"setHistory\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_claimData\",\"type\":\"bytes\"}],\"name\":\"submitClaim\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractIERC20\",\"name\":\"_token\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_recipient\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_amount\",\"type\":\"uint256\"}],\"name\":\"withdrawERC20Tokens\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", -} - -// AuthorityABI is the input ABI used to generate the binding from. -// Deprecated: Use AuthorityMetaData.ABI instead. -var AuthorityABI = AuthorityMetaData.ABI - -// Authority is an auto generated Go binding around an Ethereum contract. -type Authority struct { - AuthorityCaller // Read-only binding to the contract - AuthorityTransactor // Write-only binding to the contract - AuthorityFilterer // Log filterer for contract events -} - -// AuthorityCaller is an auto generated read-only Go binding around an Ethereum contract. -type AuthorityCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// AuthorityTransactor is an auto generated write-only Go binding around an Ethereum contract. -type AuthorityTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// AuthorityFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type AuthorityFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// AuthoritySession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type AuthoritySession struct { - Contract *Authority // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// AuthorityCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type AuthorityCallerSession struct { - Contract *AuthorityCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// AuthorityTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type AuthorityTransactorSession struct { - Contract *AuthorityTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// AuthorityRaw is an auto generated low-level Go binding around an Ethereum contract. -type AuthorityRaw struct { - Contract *Authority // Generic contract binding to access the raw methods on -} - -// AuthorityCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type AuthorityCallerRaw struct { - Contract *AuthorityCaller // Generic read-only contract binding to access the raw methods on -} - -// AuthorityTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type AuthorityTransactorRaw struct { - Contract *AuthorityTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewAuthority creates a new instance of Authority, bound to a specific deployed contract. -func NewAuthority(address common.Address, backend bind.ContractBackend) (*Authority, error) { - contract, err := bindAuthority(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &Authority{AuthorityCaller: AuthorityCaller{contract: contract}, AuthorityTransactor: AuthorityTransactor{contract: contract}, AuthorityFilterer: AuthorityFilterer{contract: contract}}, nil -} - -// NewAuthorityCaller creates a new read-only instance of Authority, bound to a specific deployed contract. -func NewAuthorityCaller(address common.Address, caller bind.ContractCaller) (*AuthorityCaller, error) { - contract, err := bindAuthority(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &AuthorityCaller{contract: contract}, nil -} - -// NewAuthorityTransactor creates a new write-only instance of Authority, bound to a specific deployed contract. -func NewAuthorityTransactor(address common.Address, transactor bind.ContractTransactor) (*AuthorityTransactor, error) { - contract, err := bindAuthority(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &AuthorityTransactor{contract: contract}, nil -} - -// NewAuthorityFilterer creates a new log filterer instance of Authority, bound to a specific deployed contract. -func NewAuthorityFilterer(address common.Address, filterer bind.ContractFilterer) (*AuthorityFilterer, error) { - contract, err := bindAuthority(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &AuthorityFilterer{contract: contract}, nil -} - -// bindAuthority binds a generic wrapper to an already deployed contract. -func bindAuthority(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := AuthorityMetaData.GetAbi() - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Authority *AuthorityRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Authority.Contract.AuthorityCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Authority *AuthorityRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _Authority.Contract.AuthorityTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Authority *AuthorityRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _Authority.Contract.AuthorityTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_Authority *AuthorityCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _Authority.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_Authority *AuthorityTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _Authority.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_Authority *AuthorityTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _Authority.Contract.contract.Transact(opts, method, params...) -} - -// GetClaim is a free data retrieval call binding the contract method 0xd79a8240. -// -// Solidity: function getClaim(address _dapp, bytes _proofContext) view returns(bytes32, uint256, uint256) -func (_Authority *AuthorityCaller) GetClaim(opts *bind.CallOpts, _dapp common.Address, _proofContext []byte) ([32]byte, *big.Int, *big.Int, error) { - var out []interface{} - err := _Authority.contract.Call(opts, &out, "getClaim", _dapp, _proofContext) - - if err != nil { - return *new([32]byte), *new(*big.Int), *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) - out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) - out2 := *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) - - return out0, out1, out2, err - -} - -// GetClaim is a free data retrieval call binding the contract method 0xd79a8240. -// -// Solidity: function getClaim(address _dapp, bytes _proofContext) view returns(bytes32, uint256, uint256) -func (_Authority *AuthoritySession) GetClaim(_dapp common.Address, _proofContext []byte) ([32]byte, *big.Int, *big.Int, error) { - return _Authority.Contract.GetClaim(&_Authority.CallOpts, _dapp, _proofContext) -} - -// GetClaim is a free data retrieval call binding the contract method 0xd79a8240. -// -// Solidity: function getClaim(address _dapp, bytes _proofContext) view returns(bytes32, uint256, uint256) -func (_Authority *AuthorityCallerSession) GetClaim(_dapp common.Address, _proofContext []byte) ([32]byte, *big.Int, *big.Int, error) { - return _Authority.Contract.GetClaim(&_Authority.CallOpts, _dapp, _proofContext) -} - -// GetHistory is a free data retrieval call binding the contract method 0xaa15efc8. -// -// Solidity: function getHistory() view returns(address) -func (_Authority *AuthorityCaller) GetHistory(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _Authority.contract.Call(opts, &out, "getHistory") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -// GetHistory is a free data retrieval call binding the contract method 0xaa15efc8. -// -// Solidity: function getHistory() view returns(address) -func (_Authority *AuthoritySession) GetHistory() (common.Address, error) { - return _Authority.Contract.GetHistory(&_Authority.CallOpts) -} - -// GetHistory is a free data retrieval call binding the contract method 0xaa15efc8. -// -// Solidity: function getHistory() view returns(address) -func (_Authority *AuthorityCallerSession) GetHistory() (common.Address, error) { - return _Authority.Contract.GetHistory(&_Authority.CallOpts) -} - -// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. -// -// Solidity: function owner() view returns(address) -func (_Authority *AuthorityCaller) Owner(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _Authority.contract.Call(opts, &out, "owner") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. -// -// Solidity: function owner() view returns(address) -func (_Authority *AuthoritySession) Owner() (common.Address, error) { - return _Authority.Contract.Owner(&_Authority.CallOpts) -} - -// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. -// -// Solidity: function owner() view returns(address) -func (_Authority *AuthorityCallerSession) Owner() (common.Address, error) { - return _Authority.Contract.Owner(&_Authority.CallOpts) -} - -// Join is a paid mutator transaction binding the contract method 0xb688a363. -// -// Solidity: function join() returns() -func (_Authority *AuthorityTransactor) Join(opts *bind.TransactOpts) (*types.Transaction, error) { - return _Authority.contract.Transact(opts, "join") -} - -// Join is a paid mutator transaction binding the contract method 0xb688a363. -// -// Solidity: function join() returns() -func (_Authority *AuthoritySession) Join() (*types.Transaction, error) { - return _Authority.Contract.Join(&_Authority.TransactOpts) -} - -// Join is a paid mutator transaction binding the contract method 0xb688a363. -// -// Solidity: function join() returns() -func (_Authority *AuthorityTransactorSession) Join() (*types.Transaction, error) { - return _Authority.Contract.Join(&_Authority.TransactOpts) -} - -// MigrateHistoryToConsensus is a paid mutator transaction binding the contract method 0x9368a3d3. -// -// Solidity: function migrateHistoryToConsensus(address _consensus) returns() -func (_Authority *AuthorityTransactor) MigrateHistoryToConsensus(opts *bind.TransactOpts, _consensus common.Address) (*types.Transaction, error) { - return _Authority.contract.Transact(opts, "migrateHistoryToConsensus", _consensus) -} - -// MigrateHistoryToConsensus is a paid mutator transaction binding the contract method 0x9368a3d3. -// -// Solidity: function migrateHistoryToConsensus(address _consensus) returns() -func (_Authority *AuthoritySession) MigrateHistoryToConsensus(_consensus common.Address) (*types.Transaction, error) { - return _Authority.Contract.MigrateHistoryToConsensus(&_Authority.TransactOpts, _consensus) -} - -// MigrateHistoryToConsensus is a paid mutator transaction binding the contract method 0x9368a3d3. -// -// Solidity: function migrateHistoryToConsensus(address _consensus) returns() -func (_Authority *AuthorityTransactorSession) MigrateHistoryToConsensus(_consensus common.Address) (*types.Transaction, error) { - return _Authority.Contract.MigrateHistoryToConsensus(&_Authority.TransactOpts, _consensus) -} - -// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. -// -// Solidity: function renounceOwnership() returns() -func (_Authority *AuthorityTransactor) RenounceOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { - return _Authority.contract.Transact(opts, "renounceOwnership") -} - -// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. -// -// Solidity: function renounceOwnership() returns() -func (_Authority *AuthoritySession) RenounceOwnership() (*types.Transaction, error) { - return _Authority.Contract.RenounceOwnership(&_Authority.TransactOpts) -} - -// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. -// -// Solidity: function renounceOwnership() returns() -func (_Authority *AuthorityTransactorSession) RenounceOwnership() (*types.Transaction, error) { - return _Authority.Contract.RenounceOwnership(&_Authority.TransactOpts) -} - -// SetHistory is a paid mutator transaction binding the contract method 0x159c5ea1. -// -// Solidity: function setHistory(address _history) returns() -func (_Authority *AuthorityTransactor) SetHistory(opts *bind.TransactOpts, _history common.Address) (*types.Transaction, error) { - return _Authority.contract.Transact(opts, "setHistory", _history) -} - -// SetHistory is a paid mutator transaction binding the contract method 0x159c5ea1. -// -// Solidity: function setHistory(address _history) returns() -func (_Authority *AuthoritySession) SetHistory(_history common.Address) (*types.Transaction, error) { - return _Authority.Contract.SetHistory(&_Authority.TransactOpts, _history) -} - -// SetHistory is a paid mutator transaction binding the contract method 0x159c5ea1. -// -// Solidity: function setHistory(address _history) returns() -func (_Authority *AuthorityTransactorSession) SetHistory(_history common.Address) (*types.Transaction, error) { - return _Authority.Contract.SetHistory(&_Authority.TransactOpts, _history) -} - -// SubmitClaim is a paid mutator transaction binding the contract method 0xddfdfbb0. -// -// Solidity: function submitClaim(bytes _claimData) returns() -func (_Authority *AuthorityTransactor) SubmitClaim(opts *bind.TransactOpts, _claimData []byte) (*types.Transaction, error) { - return _Authority.contract.Transact(opts, "submitClaim", _claimData) -} - -// SubmitClaim is a paid mutator transaction binding the contract method 0xddfdfbb0. -// -// Solidity: function submitClaim(bytes _claimData) returns() -func (_Authority *AuthoritySession) SubmitClaim(_claimData []byte) (*types.Transaction, error) { - return _Authority.Contract.SubmitClaim(&_Authority.TransactOpts, _claimData) -} - -// SubmitClaim is a paid mutator transaction binding the contract method 0xddfdfbb0. -// -// Solidity: function submitClaim(bytes _claimData) returns() -func (_Authority *AuthorityTransactorSession) SubmitClaim(_claimData []byte) (*types.Transaction, error) { - return _Authority.Contract.SubmitClaim(&_Authority.TransactOpts, _claimData) -} - -// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. -// -// Solidity: function transferOwnership(address newOwner) returns() -func (_Authority *AuthorityTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { - return _Authority.contract.Transact(opts, "transferOwnership", newOwner) -} - -// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. -// -// Solidity: function transferOwnership(address newOwner) returns() -func (_Authority *AuthoritySession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { - return _Authority.Contract.TransferOwnership(&_Authority.TransactOpts, newOwner) -} - -// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. -// -// Solidity: function transferOwnership(address newOwner) returns() -func (_Authority *AuthorityTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { - return _Authority.Contract.TransferOwnership(&_Authority.TransactOpts, newOwner) -} - -// WithdrawERC20Tokens is a paid mutator transaction binding the contract method 0xbcdd1e13. -// -// Solidity: function withdrawERC20Tokens(address _token, address _recipient, uint256 _amount) returns() -func (_Authority *AuthorityTransactor) WithdrawERC20Tokens(opts *bind.TransactOpts, _token common.Address, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { - return _Authority.contract.Transact(opts, "withdrawERC20Tokens", _token, _recipient, _amount) -} - -// WithdrawERC20Tokens is a paid mutator transaction binding the contract method 0xbcdd1e13. -// -// Solidity: function withdrawERC20Tokens(address _token, address _recipient, uint256 _amount) returns() -func (_Authority *AuthoritySession) WithdrawERC20Tokens(_token common.Address, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { - return _Authority.Contract.WithdrawERC20Tokens(&_Authority.TransactOpts, _token, _recipient, _amount) -} - -// WithdrawERC20Tokens is a paid mutator transaction binding the contract method 0xbcdd1e13. -// -// Solidity: function withdrawERC20Tokens(address _token, address _recipient, uint256 _amount) returns() -func (_Authority *AuthorityTransactorSession) WithdrawERC20Tokens(_token common.Address, _recipient common.Address, _amount *big.Int) (*types.Transaction, error) { - return _Authority.Contract.WithdrawERC20Tokens(&_Authority.TransactOpts, _token, _recipient, _amount) -} - -// AuthorityApplicationJoinedIterator is returned from FilterApplicationJoined and is used to iterate over the raw logs and unpacked data for ApplicationJoined events raised by the Authority contract. -type AuthorityApplicationJoinedIterator struct { - Event *AuthorityApplicationJoined // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *AuthorityApplicationJoinedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(AuthorityApplicationJoined) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(AuthorityApplicationJoined) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *AuthorityApplicationJoinedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *AuthorityApplicationJoinedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// AuthorityApplicationJoined represents a ApplicationJoined event raised by the Authority contract. -type AuthorityApplicationJoined struct { - Application common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterApplicationJoined is a free log retrieval operation binding the contract event 0x27c2b702d3bff195a18baca2daf00b20a986177c5f1449af4e2d46a3c3e02ce5. -// -// Solidity: event ApplicationJoined(address application) -func (_Authority *AuthorityFilterer) FilterApplicationJoined(opts *bind.FilterOpts) (*AuthorityApplicationJoinedIterator, error) { - - logs, sub, err := _Authority.contract.FilterLogs(opts, "ApplicationJoined") - if err != nil { - return nil, err - } - return &AuthorityApplicationJoinedIterator{contract: _Authority.contract, event: "ApplicationJoined", logs: logs, sub: sub}, nil -} - -// WatchApplicationJoined is a free log subscription operation binding the contract event 0x27c2b702d3bff195a18baca2daf00b20a986177c5f1449af4e2d46a3c3e02ce5. -// -// Solidity: event ApplicationJoined(address application) -func (_Authority *AuthorityFilterer) WatchApplicationJoined(opts *bind.WatchOpts, sink chan<- *AuthorityApplicationJoined) (event.Subscription, error) { - - logs, sub, err := _Authority.contract.WatchLogs(opts, "ApplicationJoined") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(AuthorityApplicationJoined) - if err := _Authority.contract.UnpackLog(event, "ApplicationJoined", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseApplicationJoined is a log parse operation binding the contract event 0x27c2b702d3bff195a18baca2daf00b20a986177c5f1449af4e2d46a3c3e02ce5. -// -// Solidity: event ApplicationJoined(address application) -func (_Authority *AuthorityFilterer) ParseApplicationJoined(log types.Log) (*AuthorityApplicationJoined, error) { - event := new(AuthorityApplicationJoined) - if err := _Authority.contract.UnpackLog(event, "ApplicationJoined", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// AuthorityNewHistoryIterator is returned from FilterNewHistory and is used to iterate over the raw logs and unpacked data for NewHistory events raised by the Authority contract. -type AuthorityNewHistoryIterator struct { - Event *AuthorityNewHistory // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *AuthorityNewHistoryIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(AuthorityNewHistory) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(AuthorityNewHistory) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *AuthorityNewHistoryIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *AuthorityNewHistoryIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// AuthorityNewHistory represents a NewHistory event raised by the Authority contract. -type AuthorityNewHistory struct { - History common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterNewHistory is a free log retrieval operation binding the contract event 0x2bcd43869347a1d42f97ac6042f3d129817abd05a6125f9750fe3724e321d23e. -// -// Solidity: event NewHistory(address history) -func (_Authority *AuthorityFilterer) FilterNewHistory(opts *bind.FilterOpts) (*AuthorityNewHistoryIterator, error) { - - logs, sub, err := _Authority.contract.FilterLogs(opts, "NewHistory") - if err != nil { - return nil, err - } - return &AuthorityNewHistoryIterator{contract: _Authority.contract, event: "NewHistory", logs: logs, sub: sub}, nil -} - -// WatchNewHistory is a free log subscription operation binding the contract event 0x2bcd43869347a1d42f97ac6042f3d129817abd05a6125f9750fe3724e321d23e. -// -// Solidity: event NewHistory(address history) -func (_Authority *AuthorityFilterer) WatchNewHistory(opts *bind.WatchOpts, sink chan<- *AuthorityNewHistory) (event.Subscription, error) { - - logs, sub, err := _Authority.contract.WatchLogs(opts, "NewHistory") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(AuthorityNewHistory) - if err := _Authority.contract.UnpackLog(event, "NewHistory", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseNewHistory is a log parse operation binding the contract event 0x2bcd43869347a1d42f97ac6042f3d129817abd05a6125f9750fe3724e321d23e. -// -// Solidity: event NewHistory(address history) -func (_Authority *AuthorityFilterer) ParseNewHistory(log types.Log) (*AuthorityNewHistory, error) { - event := new(AuthorityNewHistory) - if err := _Authority.contract.UnpackLog(event, "NewHistory", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// AuthorityOwnershipTransferredIterator is returned from FilterOwnershipTransferred and is used to iterate over the raw logs and unpacked data for OwnershipTransferred events raised by the Authority contract. -type AuthorityOwnershipTransferredIterator struct { - Event *AuthorityOwnershipTransferred // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *AuthorityOwnershipTransferredIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(AuthorityOwnershipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(AuthorityOwnershipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *AuthorityOwnershipTransferredIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *AuthorityOwnershipTransferredIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// AuthorityOwnershipTransferred represents a OwnershipTransferred event raised by the Authority contract. -type AuthorityOwnershipTransferred struct { - PreviousOwner common.Address - NewOwner common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterOwnershipTransferred is a free log retrieval operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. -// -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_Authority *AuthorityFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*AuthorityOwnershipTransferredIterator, error) { - - var previousOwnerRule []interface{} - for _, previousOwnerItem := range previousOwner { - previousOwnerRule = append(previousOwnerRule, previousOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) - } - - logs, sub, err := _Authority.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) - if err != nil { - return nil, err - } - return &AuthorityOwnershipTransferredIterator{contract: _Authority.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil -} - -// WatchOwnershipTransferred is a free log subscription operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. -// -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_Authority *AuthorityFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *AuthorityOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { - - var previousOwnerRule []interface{} - for _, previousOwnerItem := range previousOwner { - previousOwnerRule = append(previousOwnerRule, previousOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) - } - - logs, sub, err := _Authority.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(AuthorityOwnershipTransferred) - if err := _Authority.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseOwnershipTransferred is a log parse operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. -// -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_Authority *AuthorityFilterer) ParseOwnershipTransferred(log types.Log) (*AuthorityOwnershipTransferred, error) { - event := new(AuthorityOwnershipTransferred) - if err := _Authority.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/pkg/contracts/cartesi_dapp.go b/pkg/contracts/cartesi_dapp.go deleted file mode 100644 index cb9c0cfe6..000000000 --- a/pkg/contracts/cartesi_dapp.go +++ /dev/null @@ -1,995 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contracts - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription - _ = abi.ConvertType -) - -// OutputValidityProof is an auto generated low-level Go binding around an user-defined struct. -type OutputValidityProof struct { - InputIndexWithinEpoch uint64 - OutputIndexWithinInput uint64 - OutputHashesRootHash [32]byte - VouchersEpochRootHash [32]byte - NoticesEpochRootHash [32]byte - MachineStateHash [32]byte - OutputHashInOutputHashesSiblings [][32]byte - OutputHashesInEpochSiblings [][32]byte -} - -// Proof is an auto generated low-level Go binding around an user-defined struct. -type Proof struct { - Validity OutputValidityProof - Context []byte -} - -// CartesiDAppMetaData contains all meta data concerning the CartesiDApp contract. -var CartesiDAppMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"contractIConsensus\",\"name\":\"_consensus\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_templateHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"EtherTransferFailed\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectEpochHash\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectOutputHashesRootHash\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"IncorrectOutputsEpochRootHash\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InputIndexOutOfClaimBounds\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"OnlyDApp\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"VoucherReexecutionNotAllowed\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"contractIConsensus\",\"name\":\"newConsensus\",\"type\":\"address\"}],\"name\":\"NewConsensus\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"voucherId\",\"type\":\"uint256\"}],\"name\":\"VoucherExecuted\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_destination\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_payload\",\"type\":\"bytes\"},{\"components\":[{\"components\":[{\"internalType\":\"uint64\",\"name\":\"inputIndexWithinEpoch\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"outputIndexWithinInput\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"outputHashesRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"vouchersEpochRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"noticesEpochRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"machineStateHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32[]\",\"name\":\"outputHashInOutputHashesSiblings\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"outputHashesInEpochSiblings\",\"type\":\"bytes32[]\"}],\"internalType\":\"structOutputValidityProof\",\"name\":\"validity\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"context\",\"type\":\"bytes\"}],\"internalType\":\"structProof\",\"name\":\"_proof\",\"type\":\"tuple\"}],\"name\":\"executeVoucher\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getConsensus\",\"outputs\":[{\"internalType\":\"contractIConsensus\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"getTemplateHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractIConsensus\",\"name\":\"_newConsensus\",\"type\":\"address\"}],\"name\":\"migrateToConsensus\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"},{\"internalType\":\"uint256[]\",\"name\":\"\",\"type\":\"uint256[]\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"onERC1155BatchReceived\",\"outputs\":[{\"internalType\":\"bytes4\",\"name\":\"\",\"type\":\"bytes4\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"onERC1155Received\",\"outputs\":[{\"internalType\":\"bytes4\",\"name\":\"\",\"type\":\"bytes4\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"\",\"type\":\"bytes\"}],\"name\":\"onERC721Received\",\"outputs\":[{\"internalType\":\"bytes4\",\"name\":\"\",\"type\":\"bytes4\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes4\",\"name\":\"interfaceId\",\"type\":\"bytes4\"}],\"name\":\"supportsInterface\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_notice\",\"type\":\"bytes\"},{\"components\":[{\"components\":[{\"internalType\":\"uint64\",\"name\":\"inputIndexWithinEpoch\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"outputIndexWithinInput\",\"type\":\"uint64\"},{\"internalType\":\"bytes32\",\"name\":\"outputHashesRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"vouchersEpochRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"noticesEpochRootHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"machineStateHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32[]\",\"name\":\"outputHashInOutputHashesSiblings\",\"type\":\"bytes32[]\"},{\"internalType\":\"bytes32[]\",\"name\":\"outputHashesInEpochSiblings\",\"type\":\"bytes32[]\"}],\"internalType\":\"structOutputValidityProof\",\"name\":\"validity\",\"type\":\"tuple\"},{\"internalType\":\"bytes\",\"name\":\"context\",\"type\":\"bytes\"}],\"internalType\":\"structProof\",\"name\":\"_proof\",\"type\":\"tuple\"}],\"name\":\"validateNotice\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_inputIndex\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_outputIndexWithinInput\",\"type\":\"uint256\"}],\"name\":\"wasVoucherExecuted\",\"outputs\":[{\"internalType\":\"bool\",\"name\":\"\",\"type\":\"bool\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_receiver\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_value\",\"type\":\"uint256\"}],\"name\":\"withdrawEther\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"stateMutability\":\"payable\",\"type\":\"receive\"}]", -} - -// CartesiDAppABI is the input ABI used to generate the binding from. -// Deprecated: Use CartesiDAppMetaData.ABI instead. -var CartesiDAppABI = CartesiDAppMetaData.ABI - -// CartesiDApp is an auto generated Go binding around an Ethereum contract. -type CartesiDApp struct { - CartesiDAppCaller // Read-only binding to the contract - CartesiDAppTransactor // Write-only binding to the contract - CartesiDAppFilterer // Log filterer for contract events -} - -// CartesiDAppCaller is an auto generated read-only Go binding around an Ethereum contract. -type CartesiDAppCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// CartesiDAppTransactor is an auto generated write-only Go binding around an Ethereum contract. -type CartesiDAppTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// CartesiDAppFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type CartesiDAppFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// CartesiDAppSession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type CartesiDAppSession struct { - Contract *CartesiDApp // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// CartesiDAppCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type CartesiDAppCallerSession struct { - Contract *CartesiDAppCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// CartesiDAppTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type CartesiDAppTransactorSession struct { - Contract *CartesiDAppTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// CartesiDAppRaw is an auto generated low-level Go binding around an Ethereum contract. -type CartesiDAppRaw struct { - Contract *CartesiDApp // Generic contract binding to access the raw methods on -} - -// CartesiDAppCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type CartesiDAppCallerRaw struct { - Contract *CartesiDAppCaller // Generic read-only contract binding to access the raw methods on -} - -// CartesiDAppTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type CartesiDAppTransactorRaw struct { - Contract *CartesiDAppTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewCartesiDApp creates a new instance of CartesiDApp, bound to a specific deployed contract. -func NewCartesiDApp(address common.Address, backend bind.ContractBackend) (*CartesiDApp, error) { - contract, err := bindCartesiDApp(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &CartesiDApp{CartesiDAppCaller: CartesiDAppCaller{contract: contract}, CartesiDAppTransactor: CartesiDAppTransactor{contract: contract}, CartesiDAppFilterer: CartesiDAppFilterer{contract: contract}}, nil -} - -// NewCartesiDAppCaller creates a new read-only instance of CartesiDApp, bound to a specific deployed contract. -func NewCartesiDAppCaller(address common.Address, caller bind.ContractCaller) (*CartesiDAppCaller, error) { - contract, err := bindCartesiDApp(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &CartesiDAppCaller{contract: contract}, nil -} - -// NewCartesiDAppTransactor creates a new write-only instance of CartesiDApp, bound to a specific deployed contract. -func NewCartesiDAppTransactor(address common.Address, transactor bind.ContractTransactor) (*CartesiDAppTransactor, error) { - contract, err := bindCartesiDApp(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &CartesiDAppTransactor{contract: contract}, nil -} - -// NewCartesiDAppFilterer creates a new log filterer instance of CartesiDApp, bound to a specific deployed contract. -func NewCartesiDAppFilterer(address common.Address, filterer bind.ContractFilterer) (*CartesiDAppFilterer, error) { - contract, err := bindCartesiDApp(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &CartesiDAppFilterer{contract: contract}, nil -} - -// bindCartesiDApp binds a generic wrapper to an already deployed contract. -func bindCartesiDApp(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := CartesiDAppMetaData.GetAbi() - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_CartesiDApp *CartesiDAppRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _CartesiDApp.Contract.CartesiDAppCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_CartesiDApp *CartesiDAppRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _CartesiDApp.Contract.CartesiDAppTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_CartesiDApp *CartesiDAppRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _CartesiDApp.Contract.CartesiDAppTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_CartesiDApp *CartesiDAppCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _CartesiDApp.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_CartesiDApp *CartesiDAppTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _CartesiDApp.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_CartesiDApp *CartesiDAppTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _CartesiDApp.Contract.contract.Transact(opts, method, params...) -} - -// GetConsensus is a free data retrieval call binding the contract method 0x179e740b. -// -// Solidity: function getConsensus() view returns(address) -func (_CartesiDApp *CartesiDAppCaller) GetConsensus(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _CartesiDApp.contract.Call(opts, &out, "getConsensus") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -// GetConsensus is a free data retrieval call binding the contract method 0x179e740b. -// -// Solidity: function getConsensus() view returns(address) -func (_CartesiDApp *CartesiDAppSession) GetConsensus() (common.Address, error) { - return _CartesiDApp.Contract.GetConsensus(&_CartesiDApp.CallOpts) -} - -// GetConsensus is a free data retrieval call binding the contract method 0x179e740b. -// -// Solidity: function getConsensus() view returns(address) -func (_CartesiDApp *CartesiDAppCallerSession) GetConsensus() (common.Address, error) { - return _CartesiDApp.Contract.GetConsensus(&_CartesiDApp.CallOpts) -} - -// GetTemplateHash is a free data retrieval call binding the contract method 0x61b12c66. -// -// Solidity: function getTemplateHash() view returns(bytes32) -func (_CartesiDApp *CartesiDAppCaller) GetTemplateHash(opts *bind.CallOpts) ([32]byte, error) { - var out []interface{} - err := _CartesiDApp.contract.Call(opts, &out, "getTemplateHash") - - if err != nil { - return *new([32]byte), err - } - - out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) - - return out0, err - -} - -// GetTemplateHash is a free data retrieval call binding the contract method 0x61b12c66. -// -// Solidity: function getTemplateHash() view returns(bytes32) -func (_CartesiDApp *CartesiDAppSession) GetTemplateHash() ([32]byte, error) { - return _CartesiDApp.Contract.GetTemplateHash(&_CartesiDApp.CallOpts) -} - -// GetTemplateHash is a free data retrieval call binding the contract method 0x61b12c66. -// -// Solidity: function getTemplateHash() view returns(bytes32) -func (_CartesiDApp *CartesiDAppCallerSession) GetTemplateHash() ([32]byte, error) { - return _CartesiDApp.Contract.GetTemplateHash(&_CartesiDApp.CallOpts) -} - -// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. -// -// Solidity: function owner() view returns(address) -func (_CartesiDApp *CartesiDAppCaller) Owner(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _CartesiDApp.contract.Call(opts, &out, "owner") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. -// -// Solidity: function owner() view returns(address) -func (_CartesiDApp *CartesiDAppSession) Owner() (common.Address, error) { - return _CartesiDApp.Contract.Owner(&_CartesiDApp.CallOpts) -} - -// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. -// -// Solidity: function owner() view returns(address) -func (_CartesiDApp *CartesiDAppCallerSession) Owner() (common.Address, error) { - return _CartesiDApp.Contract.Owner(&_CartesiDApp.CallOpts) -} - -// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. -// -// Solidity: function supportsInterface(bytes4 interfaceId) view returns(bool) -func (_CartesiDApp *CartesiDAppCaller) SupportsInterface(opts *bind.CallOpts, interfaceId [4]byte) (bool, error) { - var out []interface{} - err := _CartesiDApp.contract.Call(opts, &out, "supportsInterface", interfaceId) - - if err != nil { - return *new(bool), err - } - - out0 := *abi.ConvertType(out[0], new(bool)).(*bool) - - return out0, err - -} - -// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. -// -// Solidity: function supportsInterface(bytes4 interfaceId) view returns(bool) -func (_CartesiDApp *CartesiDAppSession) SupportsInterface(interfaceId [4]byte) (bool, error) { - return _CartesiDApp.Contract.SupportsInterface(&_CartesiDApp.CallOpts, interfaceId) -} - -// SupportsInterface is a free data retrieval call binding the contract method 0x01ffc9a7. -// -// Solidity: function supportsInterface(bytes4 interfaceId) view returns(bool) -func (_CartesiDApp *CartesiDAppCallerSession) SupportsInterface(interfaceId [4]byte) (bool, error) { - return _CartesiDApp.Contract.SupportsInterface(&_CartesiDApp.CallOpts, interfaceId) -} - -// ValidateNotice is a free data retrieval call binding the contract method 0x96487d46. -// -// Solidity: function validateNotice(bytes _notice, ((uint64,uint64,bytes32,bytes32,bytes32,bytes32,bytes32[],bytes32[]),bytes) _proof) view returns(bool) -func (_CartesiDApp *CartesiDAppCaller) ValidateNotice(opts *bind.CallOpts, _notice []byte, _proof Proof) (bool, error) { - var out []interface{} - err := _CartesiDApp.contract.Call(opts, &out, "validateNotice", _notice, _proof) - - if err != nil { - return *new(bool), err - } - - out0 := *abi.ConvertType(out[0], new(bool)).(*bool) - - return out0, err - -} - -// ValidateNotice is a free data retrieval call binding the contract method 0x96487d46. -// -// Solidity: function validateNotice(bytes _notice, ((uint64,uint64,bytes32,bytes32,bytes32,bytes32,bytes32[],bytes32[]),bytes) _proof) view returns(bool) -func (_CartesiDApp *CartesiDAppSession) ValidateNotice(_notice []byte, _proof Proof) (bool, error) { - return _CartesiDApp.Contract.ValidateNotice(&_CartesiDApp.CallOpts, _notice, _proof) -} - -// ValidateNotice is a free data retrieval call binding the contract method 0x96487d46. -// -// Solidity: function validateNotice(bytes _notice, ((uint64,uint64,bytes32,bytes32,bytes32,bytes32,bytes32[],bytes32[]),bytes) _proof) view returns(bool) -func (_CartesiDApp *CartesiDAppCallerSession) ValidateNotice(_notice []byte, _proof Proof) (bool, error) { - return _CartesiDApp.Contract.ValidateNotice(&_CartesiDApp.CallOpts, _notice, _proof) -} - -// WasVoucherExecuted is a free data retrieval call binding the contract method 0x9d9b1145. -// -// Solidity: function wasVoucherExecuted(uint256 _inputIndex, uint256 _outputIndexWithinInput) view returns(bool) -func (_CartesiDApp *CartesiDAppCaller) WasVoucherExecuted(opts *bind.CallOpts, _inputIndex *big.Int, _outputIndexWithinInput *big.Int) (bool, error) { - var out []interface{} - err := _CartesiDApp.contract.Call(opts, &out, "wasVoucherExecuted", _inputIndex, _outputIndexWithinInput) - - if err != nil { - return *new(bool), err - } - - out0 := *abi.ConvertType(out[0], new(bool)).(*bool) - - return out0, err - -} - -// WasVoucherExecuted is a free data retrieval call binding the contract method 0x9d9b1145. -// -// Solidity: function wasVoucherExecuted(uint256 _inputIndex, uint256 _outputIndexWithinInput) view returns(bool) -func (_CartesiDApp *CartesiDAppSession) WasVoucherExecuted(_inputIndex *big.Int, _outputIndexWithinInput *big.Int) (bool, error) { - return _CartesiDApp.Contract.WasVoucherExecuted(&_CartesiDApp.CallOpts, _inputIndex, _outputIndexWithinInput) -} - -// WasVoucherExecuted is a free data retrieval call binding the contract method 0x9d9b1145. -// -// Solidity: function wasVoucherExecuted(uint256 _inputIndex, uint256 _outputIndexWithinInput) view returns(bool) -func (_CartesiDApp *CartesiDAppCallerSession) WasVoucherExecuted(_inputIndex *big.Int, _outputIndexWithinInput *big.Int) (bool, error) { - return _CartesiDApp.Contract.WasVoucherExecuted(&_CartesiDApp.CallOpts, _inputIndex, _outputIndexWithinInput) -} - -// ExecuteVoucher is a paid mutator transaction binding the contract method 0x1250482f. -// -// Solidity: function executeVoucher(address _destination, bytes _payload, ((uint64,uint64,bytes32,bytes32,bytes32,bytes32,bytes32[],bytes32[]),bytes) _proof) returns(bool) -func (_CartesiDApp *CartesiDAppTransactor) ExecuteVoucher(opts *bind.TransactOpts, _destination common.Address, _payload []byte, _proof Proof) (*types.Transaction, error) { - return _CartesiDApp.contract.Transact(opts, "executeVoucher", _destination, _payload, _proof) -} - -// ExecuteVoucher is a paid mutator transaction binding the contract method 0x1250482f. -// -// Solidity: function executeVoucher(address _destination, bytes _payload, ((uint64,uint64,bytes32,bytes32,bytes32,bytes32,bytes32[],bytes32[]),bytes) _proof) returns(bool) -func (_CartesiDApp *CartesiDAppSession) ExecuteVoucher(_destination common.Address, _payload []byte, _proof Proof) (*types.Transaction, error) { - return _CartesiDApp.Contract.ExecuteVoucher(&_CartesiDApp.TransactOpts, _destination, _payload, _proof) -} - -// ExecuteVoucher is a paid mutator transaction binding the contract method 0x1250482f. -// -// Solidity: function executeVoucher(address _destination, bytes _payload, ((uint64,uint64,bytes32,bytes32,bytes32,bytes32,bytes32[],bytes32[]),bytes) _proof) returns(bool) -func (_CartesiDApp *CartesiDAppTransactorSession) ExecuteVoucher(_destination common.Address, _payload []byte, _proof Proof) (*types.Transaction, error) { - return _CartesiDApp.Contract.ExecuteVoucher(&_CartesiDApp.TransactOpts, _destination, _payload, _proof) -} - -// MigrateToConsensus is a paid mutator transaction binding the contract method 0xfc411683. -// -// Solidity: function migrateToConsensus(address _newConsensus) returns() -func (_CartesiDApp *CartesiDAppTransactor) MigrateToConsensus(opts *bind.TransactOpts, _newConsensus common.Address) (*types.Transaction, error) { - return _CartesiDApp.contract.Transact(opts, "migrateToConsensus", _newConsensus) -} - -// MigrateToConsensus is a paid mutator transaction binding the contract method 0xfc411683. -// -// Solidity: function migrateToConsensus(address _newConsensus) returns() -func (_CartesiDApp *CartesiDAppSession) MigrateToConsensus(_newConsensus common.Address) (*types.Transaction, error) { - return _CartesiDApp.Contract.MigrateToConsensus(&_CartesiDApp.TransactOpts, _newConsensus) -} - -// MigrateToConsensus is a paid mutator transaction binding the contract method 0xfc411683. -// -// Solidity: function migrateToConsensus(address _newConsensus) returns() -func (_CartesiDApp *CartesiDAppTransactorSession) MigrateToConsensus(_newConsensus common.Address) (*types.Transaction, error) { - return _CartesiDApp.Contract.MigrateToConsensus(&_CartesiDApp.TransactOpts, _newConsensus) -} - -// OnERC1155BatchReceived is a paid mutator transaction binding the contract method 0xbc197c81. -// -// Solidity: function onERC1155BatchReceived(address , address , uint256[] , uint256[] , bytes ) returns(bytes4) -func (_CartesiDApp *CartesiDAppTransactor) OnERC1155BatchReceived(opts *bind.TransactOpts, arg0 common.Address, arg1 common.Address, arg2 []*big.Int, arg3 []*big.Int, arg4 []byte) (*types.Transaction, error) { - return _CartesiDApp.contract.Transact(opts, "onERC1155BatchReceived", arg0, arg1, arg2, arg3, arg4) -} - -// OnERC1155BatchReceived is a paid mutator transaction binding the contract method 0xbc197c81. -// -// Solidity: function onERC1155BatchReceived(address , address , uint256[] , uint256[] , bytes ) returns(bytes4) -func (_CartesiDApp *CartesiDAppSession) OnERC1155BatchReceived(arg0 common.Address, arg1 common.Address, arg2 []*big.Int, arg3 []*big.Int, arg4 []byte) (*types.Transaction, error) { - return _CartesiDApp.Contract.OnERC1155BatchReceived(&_CartesiDApp.TransactOpts, arg0, arg1, arg2, arg3, arg4) -} - -// OnERC1155BatchReceived is a paid mutator transaction binding the contract method 0xbc197c81. -// -// Solidity: function onERC1155BatchReceived(address , address , uint256[] , uint256[] , bytes ) returns(bytes4) -func (_CartesiDApp *CartesiDAppTransactorSession) OnERC1155BatchReceived(arg0 common.Address, arg1 common.Address, arg2 []*big.Int, arg3 []*big.Int, arg4 []byte) (*types.Transaction, error) { - return _CartesiDApp.Contract.OnERC1155BatchReceived(&_CartesiDApp.TransactOpts, arg0, arg1, arg2, arg3, arg4) -} - -// OnERC1155Received is a paid mutator transaction binding the contract method 0xf23a6e61. -// -// Solidity: function onERC1155Received(address , address , uint256 , uint256 , bytes ) returns(bytes4) -func (_CartesiDApp *CartesiDAppTransactor) OnERC1155Received(opts *bind.TransactOpts, arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 *big.Int, arg4 []byte) (*types.Transaction, error) { - return _CartesiDApp.contract.Transact(opts, "onERC1155Received", arg0, arg1, arg2, arg3, arg4) -} - -// OnERC1155Received is a paid mutator transaction binding the contract method 0xf23a6e61. -// -// Solidity: function onERC1155Received(address , address , uint256 , uint256 , bytes ) returns(bytes4) -func (_CartesiDApp *CartesiDAppSession) OnERC1155Received(arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 *big.Int, arg4 []byte) (*types.Transaction, error) { - return _CartesiDApp.Contract.OnERC1155Received(&_CartesiDApp.TransactOpts, arg0, arg1, arg2, arg3, arg4) -} - -// OnERC1155Received is a paid mutator transaction binding the contract method 0xf23a6e61. -// -// Solidity: function onERC1155Received(address , address , uint256 , uint256 , bytes ) returns(bytes4) -func (_CartesiDApp *CartesiDAppTransactorSession) OnERC1155Received(arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 *big.Int, arg4 []byte) (*types.Transaction, error) { - return _CartesiDApp.Contract.OnERC1155Received(&_CartesiDApp.TransactOpts, arg0, arg1, arg2, arg3, arg4) -} - -// OnERC721Received is a paid mutator transaction binding the contract method 0x150b7a02. -// -// Solidity: function onERC721Received(address , address , uint256 , bytes ) returns(bytes4) -func (_CartesiDApp *CartesiDAppTransactor) OnERC721Received(opts *bind.TransactOpts, arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 []byte) (*types.Transaction, error) { - return _CartesiDApp.contract.Transact(opts, "onERC721Received", arg0, arg1, arg2, arg3) -} - -// OnERC721Received is a paid mutator transaction binding the contract method 0x150b7a02. -// -// Solidity: function onERC721Received(address , address , uint256 , bytes ) returns(bytes4) -func (_CartesiDApp *CartesiDAppSession) OnERC721Received(arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 []byte) (*types.Transaction, error) { - return _CartesiDApp.Contract.OnERC721Received(&_CartesiDApp.TransactOpts, arg0, arg1, arg2, arg3) -} - -// OnERC721Received is a paid mutator transaction binding the contract method 0x150b7a02. -// -// Solidity: function onERC721Received(address , address , uint256 , bytes ) returns(bytes4) -func (_CartesiDApp *CartesiDAppTransactorSession) OnERC721Received(arg0 common.Address, arg1 common.Address, arg2 *big.Int, arg3 []byte) (*types.Transaction, error) { - return _CartesiDApp.Contract.OnERC721Received(&_CartesiDApp.TransactOpts, arg0, arg1, arg2, arg3) -} - -// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. -// -// Solidity: function renounceOwnership() returns() -func (_CartesiDApp *CartesiDAppTransactor) RenounceOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { - return _CartesiDApp.contract.Transact(opts, "renounceOwnership") -} - -// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. -// -// Solidity: function renounceOwnership() returns() -func (_CartesiDApp *CartesiDAppSession) RenounceOwnership() (*types.Transaction, error) { - return _CartesiDApp.Contract.RenounceOwnership(&_CartesiDApp.TransactOpts) -} - -// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. -// -// Solidity: function renounceOwnership() returns() -func (_CartesiDApp *CartesiDAppTransactorSession) RenounceOwnership() (*types.Transaction, error) { - return _CartesiDApp.Contract.RenounceOwnership(&_CartesiDApp.TransactOpts) -} - -// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. -// -// Solidity: function transferOwnership(address newOwner) returns() -func (_CartesiDApp *CartesiDAppTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { - return _CartesiDApp.contract.Transact(opts, "transferOwnership", newOwner) -} - -// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. -// -// Solidity: function transferOwnership(address newOwner) returns() -func (_CartesiDApp *CartesiDAppSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { - return _CartesiDApp.Contract.TransferOwnership(&_CartesiDApp.TransactOpts, newOwner) -} - -// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. -// -// Solidity: function transferOwnership(address newOwner) returns() -func (_CartesiDApp *CartesiDAppTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { - return _CartesiDApp.Contract.TransferOwnership(&_CartesiDApp.TransactOpts, newOwner) -} - -// WithdrawEther is a paid mutator transaction binding the contract method 0x522f6815. -// -// Solidity: function withdrawEther(address _receiver, uint256 _value) returns() -func (_CartesiDApp *CartesiDAppTransactor) WithdrawEther(opts *bind.TransactOpts, _receiver common.Address, _value *big.Int) (*types.Transaction, error) { - return _CartesiDApp.contract.Transact(opts, "withdrawEther", _receiver, _value) -} - -// WithdrawEther is a paid mutator transaction binding the contract method 0x522f6815. -// -// Solidity: function withdrawEther(address _receiver, uint256 _value) returns() -func (_CartesiDApp *CartesiDAppSession) WithdrawEther(_receiver common.Address, _value *big.Int) (*types.Transaction, error) { - return _CartesiDApp.Contract.WithdrawEther(&_CartesiDApp.TransactOpts, _receiver, _value) -} - -// WithdrawEther is a paid mutator transaction binding the contract method 0x522f6815. -// -// Solidity: function withdrawEther(address _receiver, uint256 _value) returns() -func (_CartesiDApp *CartesiDAppTransactorSession) WithdrawEther(_receiver common.Address, _value *big.Int) (*types.Transaction, error) { - return _CartesiDApp.Contract.WithdrawEther(&_CartesiDApp.TransactOpts, _receiver, _value) -} - -// Receive is a paid mutator transaction binding the contract receive function. -// -// Solidity: receive() payable returns() -func (_CartesiDApp *CartesiDAppTransactor) Receive(opts *bind.TransactOpts) (*types.Transaction, error) { - return _CartesiDApp.contract.RawTransact(opts, nil) // calldata is disallowed for receive function -} - -// Receive is a paid mutator transaction binding the contract receive function. -// -// Solidity: receive() payable returns() -func (_CartesiDApp *CartesiDAppSession) Receive() (*types.Transaction, error) { - return _CartesiDApp.Contract.Receive(&_CartesiDApp.TransactOpts) -} - -// Receive is a paid mutator transaction binding the contract receive function. -// -// Solidity: receive() payable returns() -func (_CartesiDApp *CartesiDAppTransactorSession) Receive() (*types.Transaction, error) { - return _CartesiDApp.Contract.Receive(&_CartesiDApp.TransactOpts) -} - -// CartesiDAppNewConsensusIterator is returned from FilterNewConsensus and is used to iterate over the raw logs and unpacked data for NewConsensus events raised by the CartesiDApp contract. -type CartesiDAppNewConsensusIterator struct { - Event *CartesiDAppNewConsensus // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *CartesiDAppNewConsensusIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(CartesiDAppNewConsensus) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(CartesiDAppNewConsensus) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *CartesiDAppNewConsensusIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *CartesiDAppNewConsensusIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// CartesiDAppNewConsensus represents a NewConsensus event raised by the CartesiDApp contract. -type CartesiDAppNewConsensus struct { - NewConsensus common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterNewConsensus is a free log retrieval operation binding the contract event 0x4991c6f37185659e276ff918a96f3e20e6c5abcd8c9aab450dc19c2f7ad35cb5. -// -// Solidity: event NewConsensus(address newConsensus) -func (_CartesiDApp *CartesiDAppFilterer) FilterNewConsensus(opts *bind.FilterOpts) (*CartesiDAppNewConsensusIterator, error) { - - logs, sub, err := _CartesiDApp.contract.FilterLogs(opts, "NewConsensus") - if err != nil { - return nil, err - } - return &CartesiDAppNewConsensusIterator{contract: _CartesiDApp.contract, event: "NewConsensus", logs: logs, sub: sub}, nil -} - -// WatchNewConsensus is a free log subscription operation binding the contract event 0x4991c6f37185659e276ff918a96f3e20e6c5abcd8c9aab450dc19c2f7ad35cb5. -// -// Solidity: event NewConsensus(address newConsensus) -func (_CartesiDApp *CartesiDAppFilterer) WatchNewConsensus(opts *bind.WatchOpts, sink chan<- *CartesiDAppNewConsensus) (event.Subscription, error) { - - logs, sub, err := _CartesiDApp.contract.WatchLogs(opts, "NewConsensus") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(CartesiDAppNewConsensus) - if err := _CartesiDApp.contract.UnpackLog(event, "NewConsensus", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseNewConsensus is a log parse operation binding the contract event 0x4991c6f37185659e276ff918a96f3e20e6c5abcd8c9aab450dc19c2f7ad35cb5. -// -// Solidity: event NewConsensus(address newConsensus) -func (_CartesiDApp *CartesiDAppFilterer) ParseNewConsensus(log types.Log) (*CartesiDAppNewConsensus, error) { - event := new(CartesiDAppNewConsensus) - if err := _CartesiDApp.contract.UnpackLog(event, "NewConsensus", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// CartesiDAppOwnershipTransferredIterator is returned from FilterOwnershipTransferred and is used to iterate over the raw logs and unpacked data for OwnershipTransferred events raised by the CartesiDApp contract. -type CartesiDAppOwnershipTransferredIterator struct { - Event *CartesiDAppOwnershipTransferred // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *CartesiDAppOwnershipTransferredIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(CartesiDAppOwnershipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(CartesiDAppOwnershipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *CartesiDAppOwnershipTransferredIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *CartesiDAppOwnershipTransferredIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// CartesiDAppOwnershipTransferred represents a OwnershipTransferred event raised by the CartesiDApp contract. -type CartesiDAppOwnershipTransferred struct { - PreviousOwner common.Address - NewOwner common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterOwnershipTransferred is a free log retrieval operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. -// -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_CartesiDApp *CartesiDAppFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*CartesiDAppOwnershipTransferredIterator, error) { - - var previousOwnerRule []interface{} - for _, previousOwnerItem := range previousOwner { - previousOwnerRule = append(previousOwnerRule, previousOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) - } - - logs, sub, err := _CartesiDApp.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) - if err != nil { - return nil, err - } - return &CartesiDAppOwnershipTransferredIterator{contract: _CartesiDApp.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil -} - -// WatchOwnershipTransferred is a free log subscription operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. -// -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_CartesiDApp *CartesiDAppFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *CartesiDAppOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { - - var previousOwnerRule []interface{} - for _, previousOwnerItem := range previousOwner { - previousOwnerRule = append(previousOwnerRule, previousOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) - } - - logs, sub, err := _CartesiDApp.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(CartesiDAppOwnershipTransferred) - if err := _CartesiDApp.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseOwnershipTransferred is a log parse operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. -// -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_CartesiDApp *CartesiDAppFilterer) ParseOwnershipTransferred(log types.Log) (*CartesiDAppOwnershipTransferred, error) { - event := new(CartesiDAppOwnershipTransferred) - if err := _CartesiDApp.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// CartesiDAppVoucherExecutedIterator is returned from FilterVoucherExecuted and is used to iterate over the raw logs and unpacked data for VoucherExecuted events raised by the CartesiDApp contract. -type CartesiDAppVoucherExecutedIterator struct { - Event *CartesiDAppVoucherExecuted // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *CartesiDAppVoucherExecutedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(CartesiDAppVoucherExecuted) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(CartesiDAppVoucherExecuted) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *CartesiDAppVoucherExecutedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *CartesiDAppVoucherExecutedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// CartesiDAppVoucherExecuted represents a VoucherExecuted event raised by the CartesiDApp contract. -type CartesiDAppVoucherExecuted struct { - VoucherId *big.Int - Raw types.Log // Blockchain specific contextual infos -} - -// FilterVoucherExecuted is a free log retrieval operation binding the contract event 0x0eb7ee080f865f1cadc4f54daf58cc3b8879e888832867d13351edcec0fbdc54. -// -// Solidity: event VoucherExecuted(uint256 voucherId) -func (_CartesiDApp *CartesiDAppFilterer) FilterVoucherExecuted(opts *bind.FilterOpts) (*CartesiDAppVoucherExecutedIterator, error) { - - logs, sub, err := _CartesiDApp.contract.FilterLogs(opts, "VoucherExecuted") - if err != nil { - return nil, err - } - return &CartesiDAppVoucherExecutedIterator{contract: _CartesiDApp.contract, event: "VoucherExecuted", logs: logs, sub: sub}, nil -} - -// WatchVoucherExecuted is a free log subscription operation binding the contract event 0x0eb7ee080f865f1cadc4f54daf58cc3b8879e888832867d13351edcec0fbdc54. -// -// Solidity: event VoucherExecuted(uint256 voucherId) -func (_CartesiDApp *CartesiDAppFilterer) WatchVoucherExecuted(opts *bind.WatchOpts, sink chan<- *CartesiDAppVoucherExecuted) (event.Subscription, error) { - - logs, sub, err := _CartesiDApp.contract.WatchLogs(opts, "VoucherExecuted") - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(CartesiDAppVoucherExecuted) - if err := _CartesiDApp.contract.UnpackLog(event, "VoucherExecuted", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseVoucherExecuted is a log parse operation binding the contract event 0x0eb7ee080f865f1cadc4f54daf58cc3b8879e888832867d13351edcec0fbdc54. -// -// Solidity: event VoucherExecuted(uint256 voucherId) -func (_CartesiDApp *CartesiDAppFilterer) ParseVoucherExecuted(log types.Log) (*CartesiDAppVoucherExecuted, error) { - event := new(CartesiDAppVoucherExecuted) - if err := _CartesiDApp.contract.UnpackLog(event, "VoucherExecuted", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/pkg/contracts/cartesi_dapp_factory.go b/pkg/contracts/cartesi_dapp_factory.go deleted file mode 100644 index fb6b2be18..000000000 --- a/pkg/contracts/cartesi_dapp_factory.go +++ /dev/null @@ -1,401 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contracts - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription - _ = abi.ConvertType -) - -// CartesiDAppFactoryMetaData contains all meta data concerning the CartesiDAppFactory contract. -var CartesiDAppFactoryMetaData = &bind.MetaData{ - ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"contractIConsensus\",\"name\":\"consensus\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"dappOwner\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"templateHash\",\"type\":\"bytes32\"},{\"indexed\":false,\"internalType\":\"contractCartesiDApp\",\"name\":\"application\",\"type\":\"address\"}],\"name\":\"ApplicationCreated\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"contractIConsensus\",\"name\":\"_consensus\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_dappOwner\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_templateHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_salt\",\"type\":\"bytes32\"}],\"name\":\"calculateApplicationAddress\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractIConsensus\",\"name\":\"_consensus\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_dappOwner\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_templateHash\",\"type\":\"bytes32\"},{\"internalType\":\"bytes32\",\"name\":\"_salt\",\"type\":\"bytes32\"}],\"name\":\"newApplication\",\"outputs\":[{\"internalType\":\"contractCartesiDApp\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"contractIConsensus\",\"name\":\"_consensus\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"_dappOwner\",\"type\":\"address\"},{\"internalType\":\"bytes32\",\"name\":\"_templateHash\",\"type\":\"bytes32\"}],\"name\":\"newApplication\",\"outputs\":[{\"internalType\":\"contractCartesiDApp\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", -} - -// CartesiDAppFactoryABI is the input ABI used to generate the binding from. -// Deprecated: Use CartesiDAppFactoryMetaData.ABI instead. -var CartesiDAppFactoryABI = CartesiDAppFactoryMetaData.ABI - -// CartesiDAppFactory is an auto generated Go binding around an Ethereum contract. -type CartesiDAppFactory struct { - CartesiDAppFactoryCaller // Read-only binding to the contract - CartesiDAppFactoryTransactor // Write-only binding to the contract - CartesiDAppFactoryFilterer // Log filterer for contract events -} - -// CartesiDAppFactoryCaller is an auto generated read-only Go binding around an Ethereum contract. -type CartesiDAppFactoryCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// CartesiDAppFactoryTransactor is an auto generated write-only Go binding around an Ethereum contract. -type CartesiDAppFactoryTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// CartesiDAppFactoryFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type CartesiDAppFactoryFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// CartesiDAppFactorySession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type CartesiDAppFactorySession struct { - Contract *CartesiDAppFactory // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// CartesiDAppFactoryCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type CartesiDAppFactoryCallerSession struct { - Contract *CartesiDAppFactoryCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// CartesiDAppFactoryTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type CartesiDAppFactoryTransactorSession struct { - Contract *CartesiDAppFactoryTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// CartesiDAppFactoryRaw is an auto generated low-level Go binding around an Ethereum contract. -type CartesiDAppFactoryRaw struct { - Contract *CartesiDAppFactory // Generic contract binding to access the raw methods on -} - -// CartesiDAppFactoryCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type CartesiDAppFactoryCallerRaw struct { - Contract *CartesiDAppFactoryCaller // Generic read-only contract binding to access the raw methods on -} - -// CartesiDAppFactoryTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type CartesiDAppFactoryTransactorRaw struct { - Contract *CartesiDAppFactoryTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewCartesiDAppFactory creates a new instance of CartesiDAppFactory, bound to a specific deployed contract. -func NewCartesiDAppFactory(address common.Address, backend bind.ContractBackend) (*CartesiDAppFactory, error) { - contract, err := bindCartesiDAppFactory(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &CartesiDAppFactory{CartesiDAppFactoryCaller: CartesiDAppFactoryCaller{contract: contract}, CartesiDAppFactoryTransactor: CartesiDAppFactoryTransactor{contract: contract}, CartesiDAppFactoryFilterer: CartesiDAppFactoryFilterer{contract: contract}}, nil -} - -// NewCartesiDAppFactoryCaller creates a new read-only instance of CartesiDAppFactory, bound to a specific deployed contract. -func NewCartesiDAppFactoryCaller(address common.Address, caller bind.ContractCaller) (*CartesiDAppFactoryCaller, error) { - contract, err := bindCartesiDAppFactory(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &CartesiDAppFactoryCaller{contract: contract}, nil -} - -// NewCartesiDAppFactoryTransactor creates a new write-only instance of CartesiDAppFactory, bound to a specific deployed contract. -func NewCartesiDAppFactoryTransactor(address common.Address, transactor bind.ContractTransactor) (*CartesiDAppFactoryTransactor, error) { - contract, err := bindCartesiDAppFactory(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &CartesiDAppFactoryTransactor{contract: contract}, nil -} - -// NewCartesiDAppFactoryFilterer creates a new log filterer instance of CartesiDAppFactory, bound to a specific deployed contract. -func NewCartesiDAppFactoryFilterer(address common.Address, filterer bind.ContractFilterer) (*CartesiDAppFactoryFilterer, error) { - contract, err := bindCartesiDAppFactory(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &CartesiDAppFactoryFilterer{contract: contract}, nil -} - -// bindCartesiDAppFactory binds a generic wrapper to an already deployed contract. -func bindCartesiDAppFactory(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := CartesiDAppFactoryMetaData.GetAbi() - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_CartesiDAppFactory *CartesiDAppFactoryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _CartesiDAppFactory.Contract.CartesiDAppFactoryCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_CartesiDAppFactory *CartesiDAppFactoryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _CartesiDAppFactory.Contract.CartesiDAppFactoryTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_CartesiDAppFactory *CartesiDAppFactoryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _CartesiDAppFactory.Contract.CartesiDAppFactoryTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_CartesiDAppFactory *CartesiDAppFactoryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _CartesiDAppFactory.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_CartesiDAppFactory *CartesiDAppFactoryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _CartesiDAppFactory.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_CartesiDAppFactory *CartesiDAppFactoryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _CartesiDAppFactory.Contract.contract.Transact(opts, method, params...) -} - -// CalculateApplicationAddress is a free data retrieval call binding the contract method 0xbd4f1219. -// -// Solidity: function calculateApplicationAddress(address _consensus, address _dappOwner, bytes32 _templateHash, bytes32 _salt) view returns(address) -func (_CartesiDAppFactory *CartesiDAppFactoryCaller) CalculateApplicationAddress(opts *bind.CallOpts, _consensus common.Address, _dappOwner common.Address, _templateHash [32]byte, _salt [32]byte) (common.Address, error) { - var out []interface{} - err := _CartesiDAppFactory.contract.Call(opts, &out, "calculateApplicationAddress", _consensus, _dappOwner, _templateHash, _salt) - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -// CalculateApplicationAddress is a free data retrieval call binding the contract method 0xbd4f1219. -// -// Solidity: function calculateApplicationAddress(address _consensus, address _dappOwner, bytes32 _templateHash, bytes32 _salt) view returns(address) -func (_CartesiDAppFactory *CartesiDAppFactorySession) CalculateApplicationAddress(_consensus common.Address, _dappOwner common.Address, _templateHash [32]byte, _salt [32]byte) (common.Address, error) { - return _CartesiDAppFactory.Contract.CalculateApplicationAddress(&_CartesiDAppFactory.CallOpts, _consensus, _dappOwner, _templateHash, _salt) -} - -// CalculateApplicationAddress is a free data retrieval call binding the contract method 0xbd4f1219. -// -// Solidity: function calculateApplicationAddress(address _consensus, address _dappOwner, bytes32 _templateHash, bytes32 _salt) view returns(address) -func (_CartesiDAppFactory *CartesiDAppFactoryCallerSession) CalculateApplicationAddress(_consensus common.Address, _dappOwner common.Address, _templateHash [32]byte, _salt [32]byte) (common.Address, error) { - return _CartesiDAppFactory.Contract.CalculateApplicationAddress(&_CartesiDAppFactory.CallOpts, _consensus, _dappOwner, _templateHash, _salt) -} - -// NewApplication is a paid mutator transaction binding the contract method 0x0e1a07f5. -// -// Solidity: function newApplication(address _consensus, address _dappOwner, bytes32 _templateHash, bytes32 _salt) returns(address) -func (_CartesiDAppFactory *CartesiDAppFactoryTransactor) NewApplication(opts *bind.TransactOpts, _consensus common.Address, _dappOwner common.Address, _templateHash [32]byte, _salt [32]byte) (*types.Transaction, error) { - return _CartesiDAppFactory.contract.Transact(opts, "newApplication", _consensus, _dappOwner, _templateHash, _salt) -} - -// NewApplication is a paid mutator transaction binding the contract method 0x0e1a07f5. -// -// Solidity: function newApplication(address _consensus, address _dappOwner, bytes32 _templateHash, bytes32 _salt) returns(address) -func (_CartesiDAppFactory *CartesiDAppFactorySession) NewApplication(_consensus common.Address, _dappOwner common.Address, _templateHash [32]byte, _salt [32]byte) (*types.Transaction, error) { - return _CartesiDAppFactory.Contract.NewApplication(&_CartesiDAppFactory.TransactOpts, _consensus, _dappOwner, _templateHash, _salt) -} - -// NewApplication is a paid mutator transaction binding the contract method 0x0e1a07f5. -// -// Solidity: function newApplication(address _consensus, address _dappOwner, bytes32 _templateHash, bytes32 _salt) returns(address) -func (_CartesiDAppFactory *CartesiDAppFactoryTransactorSession) NewApplication(_consensus common.Address, _dappOwner common.Address, _templateHash [32]byte, _salt [32]byte) (*types.Transaction, error) { - return _CartesiDAppFactory.Contract.NewApplication(&_CartesiDAppFactory.TransactOpts, _consensus, _dappOwner, _templateHash, _salt) -} - -// NewApplication0 is a paid mutator transaction binding the contract method 0x3648bfb5. -// -// Solidity: function newApplication(address _consensus, address _dappOwner, bytes32 _templateHash) returns(address) -func (_CartesiDAppFactory *CartesiDAppFactoryTransactor) NewApplication0(opts *bind.TransactOpts, _consensus common.Address, _dappOwner common.Address, _templateHash [32]byte) (*types.Transaction, error) { - return _CartesiDAppFactory.contract.Transact(opts, "newApplication0", _consensus, _dappOwner, _templateHash) -} - -// NewApplication0 is a paid mutator transaction binding the contract method 0x3648bfb5. -// -// Solidity: function newApplication(address _consensus, address _dappOwner, bytes32 _templateHash) returns(address) -func (_CartesiDAppFactory *CartesiDAppFactorySession) NewApplication0(_consensus common.Address, _dappOwner common.Address, _templateHash [32]byte) (*types.Transaction, error) { - return _CartesiDAppFactory.Contract.NewApplication0(&_CartesiDAppFactory.TransactOpts, _consensus, _dappOwner, _templateHash) -} - -// NewApplication0 is a paid mutator transaction binding the contract method 0x3648bfb5. -// -// Solidity: function newApplication(address _consensus, address _dappOwner, bytes32 _templateHash) returns(address) -func (_CartesiDAppFactory *CartesiDAppFactoryTransactorSession) NewApplication0(_consensus common.Address, _dappOwner common.Address, _templateHash [32]byte) (*types.Transaction, error) { - return _CartesiDAppFactory.Contract.NewApplication0(&_CartesiDAppFactory.TransactOpts, _consensus, _dappOwner, _templateHash) -} - -// CartesiDAppFactoryApplicationCreatedIterator is returned from FilterApplicationCreated and is used to iterate over the raw logs and unpacked data for ApplicationCreated events raised by the CartesiDAppFactory contract. -type CartesiDAppFactoryApplicationCreatedIterator struct { - Event *CartesiDAppFactoryApplicationCreated // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *CartesiDAppFactoryApplicationCreatedIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(CartesiDAppFactoryApplicationCreated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(CartesiDAppFactoryApplicationCreated) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *CartesiDAppFactoryApplicationCreatedIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *CartesiDAppFactoryApplicationCreatedIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// CartesiDAppFactoryApplicationCreated represents a ApplicationCreated event raised by the CartesiDAppFactory contract. -type CartesiDAppFactoryApplicationCreated struct { - Consensus common.Address - DappOwner common.Address - TemplateHash [32]byte - Application common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterApplicationCreated is a free log retrieval operation binding the contract event 0xe73165c2d277daf8713fd08b40845cb6bb7a20b2b543f3d35324a475660fcebd. -// -// Solidity: event ApplicationCreated(address indexed consensus, address dappOwner, bytes32 templateHash, address application) -func (_CartesiDAppFactory *CartesiDAppFactoryFilterer) FilterApplicationCreated(opts *bind.FilterOpts, consensus []common.Address) (*CartesiDAppFactoryApplicationCreatedIterator, error) { - - var consensusRule []interface{} - for _, consensusItem := range consensus { - consensusRule = append(consensusRule, consensusItem) - } - - logs, sub, err := _CartesiDAppFactory.contract.FilterLogs(opts, "ApplicationCreated", consensusRule) - if err != nil { - return nil, err - } - return &CartesiDAppFactoryApplicationCreatedIterator{contract: _CartesiDAppFactory.contract, event: "ApplicationCreated", logs: logs, sub: sub}, nil -} - -// WatchApplicationCreated is a free log subscription operation binding the contract event 0xe73165c2d277daf8713fd08b40845cb6bb7a20b2b543f3d35324a475660fcebd. -// -// Solidity: event ApplicationCreated(address indexed consensus, address dappOwner, bytes32 templateHash, address application) -func (_CartesiDAppFactory *CartesiDAppFactoryFilterer) WatchApplicationCreated(opts *bind.WatchOpts, sink chan<- *CartesiDAppFactoryApplicationCreated, consensus []common.Address) (event.Subscription, error) { - - var consensusRule []interface{} - for _, consensusItem := range consensus { - consensusRule = append(consensusRule, consensusItem) - } - - logs, sub, err := _CartesiDAppFactory.contract.WatchLogs(opts, "ApplicationCreated", consensusRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(CartesiDAppFactoryApplicationCreated) - if err := _CartesiDAppFactory.contract.UnpackLog(event, "ApplicationCreated", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseApplicationCreated is a log parse operation binding the contract event 0xe73165c2d277daf8713fd08b40845cb6bb7a20b2b543f3d35324a475660fcebd. -// -// Solidity: event ApplicationCreated(address indexed consensus, address dappOwner, bytes32 templateHash, address application) -func (_CartesiDAppFactory *CartesiDAppFactoryFilterer) ParseApplicationCreated(log types.Log) (*CartesiDAppFactoryApplicationCreated, error) { - event := new(CartesiDAppFactoryApplicationCreated) - if err := _CartesiDAppFactory.contract.UnpackLog(event, "ApplicationCreated", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/pkg/contracts/generate/main.go b/pkg/contracts/generate/main.go index a0dba4861..c210a8fcc 100644 --- a/pkg/contracts/generate/main.go +++ b/pkg/contracts/generate/main.go @@ -12,49 +12,45 @@ import ( "archive/tar" "compress/gzip" "encoding/json" + "errors" "io" + "io/fs" "log" "net/http" "os" + "strings" "github.com/ethereum/go-ethereum/accounts/abi/bind" ) -const rollupsContractsUrl = "https://registry.npmjs.org/@cartesi/rollups/-/rollups-1.2.0.tgz" +const rollupsContractsUrl = "https://registry.npmjs.org/@cartesi/rollups/-/rollups-2.0.0-rc.2.tgz" const baseContractsPath = "package/export/artifacts/contracts/" -const bindingPkg = "contracts" type contractBinding struct { jsonPath string typeName string - outFile string } var bindings = []contractBinding{ { - jsonPath: baseContractsPath + "inputs/InputBox.sol/InputBox.json", - typeName: "InputBox", - outFile: "input_box.go", + jsonPath: baseContractsPath + "consensus/IConsensus.sol/IConsensus.json", + typeName: "IConsensus", }, { - jsonPath: baseContractsPath + "dapp/CartesiDAppFactory.sol/CartesiDAppFactory.json", - typeName: "CartesiDAppFactory", - outFile: "cartesi_dapp_factory.go", + jsonPath: baseContractsPath + "dapp/Application.sol/Application.json", + typeName: "Application", }, { - jsonPath: baseContractsPath + "dapp/CartesiDApp.sol/CartesiDApp.json", - typeName: "CartesiDApp", - outFile: "cartesi_dapp.go", + jsonPath: baseContractsPath + "inputs/InputBox.sol/InputBox.json", + typeName: "InputBox", }, { - jsonPath: baseContractsPath + "consensus/authority/Authority.sol/Authority.json", - typeName: "Authority", - outFile: "authority.go", + jsonPath: baseContractsPath + "common/Inputs.sol/Inputs.json", + typeName: "Inputs", }, { - jsonPath: baseContractsPath + "history/History.sol/History.json", - typeName: "History", - outFile: "history.go", + jsonPath: baseContractsPath + "common/Outputs.sol/Outputs.json", + typeName: "Outputs", }, } @@ -136,9 +132,16 @@ func getAbi(rawJson []byte) []byte { return contents.Abi } +// Check whether file exists. +func fileExists(filePath string) bool { + _, err := os.Stat(filePath) + return !errors.Is(err, fs.ErrNotExist) +} + // Generate the Go bindings for the contracts. func generateBinding(b contractBinding, content []byte) { var ( + pkg = strings.ToLower(b.typeName) sigs []map[string]string abis = []string{string(getAbi(content))} bins = []string{""} @@ -146,10 +149,22 @@ func generateBinding(b contractBinding, content []byte) { libs = make(map[string]string) aliases = make(map[string]string) ) - code, err := bind.Bind(types, abis, bins, sigs, bindingPkg, bind.LangGo, libs, aliases) + code, err := bind.Bind(types, abis, bins, sigs, pkg, bind.LangGo, libs, aliases) checkErr("generate binding", err) + + if fileExists(pkg) { + err := os.RemoveAll(pkg) + checkErr("removing dir", err) + } + + const dirMode = 0700 + err = os.Mkdir(pkg, dirMode) + checkErr("creating dir", err) + const fileMode = 0600 - err = os.WriteFile(b.outFile, []byte(code), fileMode) + filePath := pkg + "/" + pkg + ".go" + err = os.WriteFile(filePath, []byte(code), fileMode) checkErr("write binding file", err) - log.Print("generated binding ", b.outFile) + + log.Print("generated binding for ", filePath) } diff --git a/pkg/contracts/history.go b/pkg/contracts/history.go deleted file mode 100644 index 47489c82e..000000000 --- a/pkg/contracts/history.go +++ /dev/null @@ -1,634 +0,0 @@ -// Code generated - DO NOT EDIT. -// This file is a generated binding and any manual changes will be lost. - -package contracts - -import ( - "errors" - "math/big" - "strings" - - ethereum "github.com/ethereum/go-ethereum" - "github.com/ethereum/go-ethereum/accounts/abi" - "github.com/ethereum/go-ethereum/accounts/abi/bind" - "github.com/ethereum/go-ethereum/common" - "github.com/ethereum/go-ethereum/core/types" - "github.com/ethereum/go-ethereum/event" -) - -// Reference imports to suppress errors if they are not otherwise used. -var ( - _ = errors.New - _ = big.NewInt - _ = strings.NewReader - _ = ethereum.NotFound - _ = bind.Bind - _ = common.Big1 - _ = types.BloomLookup - _ = event.NewSubscription - _ = abi.ConvertType -) - -// HistoryClaim is an auto generated low-level Go binding around an user-defined struct. -type HistoryClaim struct { - EpochHash [32]byte - FirstIndex *big.Int - LastIndex *big.Int -} - -// HistoryMetaData contains all meta data concerning the History contract. -var HistoryMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"inputs\":[],\"name\":\"InvalidClaimIndex\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"InvalidInputIndices\",\"type\":\"error\"},{\"inputs\":[],\"name\":\"UnclaimedInputs\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"dapp\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"bytes32\",\"name\":\"epochHash\",\"type\":\"bytes32\"},{\"internalType\":\"uint128\",\"name\":\"firstIndex\",\"type\":\"uint128\"},{\"internalType\":\"uint128\",\"name\":\"lastIndex\",\"type\":\"uint128\"}],\"indexed\":false,\"internalType\":\"structHistory.Claim\",\"name\":\"claim\",\"type\":\"tuple\"}],\"name\":\"NewClaimToHistory\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"previousOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_dapp\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_proofContext\",\"type\":\"bytes\"}],\"name\":\"getClaim\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_consensus\",\"type\":\"address\"}],\"name\":\"migrateToConsensus\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_claimData\",\"type\":\"bytes\"}],\"name\":\"submitClaim\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", -} - -// HistoryABI is the input ABI used to generate the binding from. -// Deprecated: Use HistoryMetaData.ABI instead. -var HistoryABI = HistoryMetaData.ABI - -// History is an auto generated Go binding around an Ethereum contract. -type History struct { - HistoryCaller // Read-only binding to the contract - HistoryTransactor // Write-only binding to the contract - HistoryFilterer // Log filterer for contract events -} - -// HistoryCaller is an auto generated read-only Go binding around an Ethereum contract. -type HistoryCaller struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// HistoryTransactor is an auto generated write-only Go binding around an Ethereum contract. -type HistoryTransactor struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// HistoryFilterer is an auto generated log filtering Go binding around an Ethereum contract events. -type HistoryFilterer struct { - contract *bind.BoundContract // Generic contract wrapper for the low level calls -} - -// HistorySession is an auto generated Go binding around an Ethereum contract, -// with pre-set call and transact options. -type HistorySession struct { - Contract *History // Generic contract binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// HistoryCallerSession is an auto generated read-only Go binding around an Ethereum contract, -// with pre-set call options. -type HistoryCallerSession struct { - Contract *HistoryCaller // Generic contract caller binding to set the session for - CallOpts bind.CallOpts // Call options to use throughout this session -} - -// HistoryTransactorSession is an auto generated write-only Go binding around an Ethereum contract, -// with pre-set transact options. -type HistoryTransactorSession struct { - Contract *HistoryTransactor // Generic contract transactor binding to set the session for - TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session -} - -// HistoryRaw is an auto generated low-level Go binding around an Ethereum contract. -type HistoryRaw struct { - Contract *History // Generic contract binding to access the raw methods on -} - -// HistoryCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. -type HistoryCallerRaw struct { - Contract *HistoryCaller // Generic read-only contract binding to access the raw methods on -} - -// HistoryTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. -type HistoryTransactorRaw struct { - Contract *HistoryTransactor // Generic write-only contract binding to access the raw methods on -} - -// NewHistory creates a new instance of History, bound to a specific deployed contract. -func NewHistory(address common.Address, backend bind.ContractBackend) (*History, error) { - contract, err := bindHistory(address, backend, backend, backend) - if err != nil { - return nil, err - } - return &History{HistoryCaller: HistoryCaller{contract: contract}, HistoryTransactor: HistoryTransactor{contract: contract}, HistoryFilterer: HistoryFilterer{contract: contract}}, nil -} - -// NewHistoryCaller creates a new read-only instance of History, bound to a specific deployed contract. -func NewHistoryCaller(address common.Address, caller bind.ContractCaller) (*HistoryCaller, error) { - contract, err := bindHistory(address, caller, nil, nil) - if err != nil { - return nil, err - } - return &HistoryCaller{contract: contract}, nil -} - -// NewHistoryTransactor creates a new write-only instance of History, bound to a specific deployed contract. -func NewHistoryTransactor(address common.Address, transactor bind.ContractTransactor) (*HistoryTransactor, error) { - contract, err := bindHistory(address, nil, transactor, nil) - if err != nil { - return nil, err - } - return &HistoryTransactor{contract: contract}, nil -} - -// NewHistoryFilterer creates a new log filterer instance of History, bound to a specific deployed contract. -func NewHistoryFilterer(address common.Address, filterer bind.ContractFilterer) (*HistoryFilterer, error) { - contract, err := bindHistory(address, nil, nil, filterer) - if err != nil { - return nil, err - } - return &HistoryFilterer{contract: contract}, nil -} - -// bindHistory binds a generic wrapper to an already deployed contract. -func bindHistory(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { - parsed, err := HistoryMetaData.GetAbi() - if err != nil { - return nil, err - } - return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_History *HistoryRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _History.Contract.HistoryCaller.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_History *HistoryRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _History.Contract.HistoryTransactor.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_History *HistoryRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _History.Contract.HistoryTransactor.contract.Transact(opts, method, params...) -} - -// Call invokes the (constant) contract method with params as input values and -// sets the output to result. The result type might be a single field for simple -// returns, a slice of interfaces for anonymous returns and a struct for named -// returns. -func (_History *HistoryCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { - return _History.Contract.contract.Call(opts, result, method, params...) -} - -// Transfer initiates a plain transaction to move funds to the contract, calling -// its default method if one is available. -func (_History *HistoryTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { - return _History.Contract.contract.Transfer(opts) -} - -// Transact invokes the (paid) contract method with params as input values. -func (_History *HistoryTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { - return _History.Contract.contract.Transact(opts, method, params...) -} - -// GetClaim is a free data retrieval call binding the contract method 0xd79a8240. -// -// Solidity: function getClaim(address _dapp, bytes _proofContext) view returns(bytes32, uint256, uint256) -func (_History *HistoryCaller) GetClaim(opts *bind.CallOpts, _dapp common.Address, _proofContext []byte) ([32]byte, *big.Int, *big.Int, error) { - var out []interface{} - err := _History.contract.Call(opts, &out, "getClaim", _dapp, _proofContext) - - if err != nil { - return *new([32]byte), *new(*big.Int), *new(*big.Int), err - } - - out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) - out1 := *abi.ConvertType(out[1], new(*big.Int)).(**big.Int) - out2 := *abi.ConvertType(out[2], new(*big.Int)).(**big.Int) - - return out0, out1, out2, err - -} - -// GetClaim is a free data retrieval call binding the contract method 0xd79a8240. -// -// Solidity: function getClaim(address _dapp, bytes _proofContext) view returns(bytes32, uint256, uint256) -func (_History *HistorySession) GetClaim(_dapp common.Address, _proofContext []byte) ([32]byte, *big.Int, *big.Int, error) { - return _History.Contract.GetClaim(&_History.CallOpts, _dapp, _proofContext) -} - -// GetClaim is a free data retrieval call binding the contract method 0xd79a8240. -// -// Solidity: function getClaim(address _dapp, bytes _proofContext) view returns(bytes32, uint256, uint256) -func (_History *HistoryCallerSession) GetClaim(_dapp common.Address, _proofContext []byte) ([32]byte, *big.Int, *big.Int, error) { - return _History.Contract.GetClaim(&_History.CallOpts, _dapp, _proofContext) -} - -// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. -// -// Solidity: function owner() view returns(address) -func (_History *HistoryCaller) Owner(opts *bind.CallOpts) (common.Address, error) { - var out []interface{} - err := _History.contract.Call(opts, &out, "owner") - - if err != nil { - return *new(common.Address), err - } - - out0 := *abi.ConvertType(out[0], new(common.Address)).(*common.Address) - - return out0, err - -} - -// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. -// -// Solidity: function owner() view returns(address) -func (_History *HistorySession) Owner() (common.Address, error) { - return _History.Contract.Owner(&_History.CallOpts) -} - -// Owner is a free data retrieval call binding the contract method 0x8da5cb5b. -// -// Solidity: function owner() view returns(address) -func (_History *HistoryCallerSession) Owner() (common.Address, error) { - return _History.Contract.Owner(&_History.CallOpts) -} - -// MigrateToConsensus is a paid mutator transaction binding the contract method 0xfc411683. -// -// Solidity: function migrateToConsensus(address _consensus) returns() -func (_History *HistoryTransactor) MigrateToConsensus(opts *bind.TransactOpts, _consensus common.Address) (*types.Transaction, error) { - return _History.contract.Transact(opts, "migrateToConsensus", _consensus) -} - -// MigrateToConsensus is a paid mutator transaction binding the contract method 0xfc411683. -// -// Solidity: function migrateToConsensus(address _consensus) returns() -func (_History *HistorySession) MigrateToConsensus(_consensus common.Address) (*types.Transaction, error) { - return _History.Contract.MigrateToConsensus(&_History.TransactOpts, _consensus) -} - -// MigrateToConsensus is a paid mutator transaction binding the contract method 0xfc411683. -// -// Solidity: function migrateToConsensus(address _consensus) returns() -func (_History *HistoryTransactorSession) MigrateToConsensus(_consensus common.Address) (*types.Transaction, error) { - return _History.Contract.MigrateToConsensus(&_History.TransactOpts, _consensus) -} - -// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. -// -// Solidity: function renounceOwnership() returns() -func (_History *HistoryTransactor) RenounceOwnership(opts *bind.TransactOpts) (*types.Transaction, error) { - return _History.contract.Transact(opts, "renounceOwnership") -} - -// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. -// -// Solidity: function renounceOwnership() returns() -func (_History *HistorySession) RenounceOwnership() (*types.Transaction, error) { - return _History.Contract.RenounceOwnership(&_History.TransactOpts) -} - -// RenounceOwnership is a paid mutator transaction binding the contract method 0x715018a6. -// -// Solidity: function renounceOwnership() returns() -func (_History *HistoryTransactorSession) RenounceOwnership() (*types.Transaction, error) { - return _History.Contract.RenounceOwnership(&_History.TransactOpts) -} - -// SubmitClaim is a paid mutator transaction binding the contract method 0xddfdfbb0. -// -// Solidity: function submitClaim(bytes _claimData) returns() -func (_History *HistoryTransactor) SubmitClaim(opts *bind.TransactOpts, _claimData []byte) (*types.Transaction, error) { - return _History.contract.Transact(opts, "submitClaim", _claimData) -} - -// SubmitClaim is a paid mutator transaction binding the contract method 0xddfdfbb0. -// -// Solidity: function submitClaim(bytes _claimData) returns() -func (_History *HistorySession) SubmitClaim(_claimData []byte) (*types.Transaction, error) { - return _History.Contract.SubmitClaim(&_History.TransactOpts, _claimData) -} - -// SubmitClaim is a paid mutator transaction binding the contract method 0xddfdfbb0. -// -// Solidity: function submitClaim(bytes _claimData) returns() -func (_History *HistoryTransactorSession) SubmitClaim(_claimData []byte) (*types.Transaction, error) { - return _History.Contract.SubmitClaim(&_History.TransactOpts, _claimData) -} - -// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. -// -// Solidity: function transferOwnership(address newOwner) returns() -func (_History *HistoryTransactor) TransferOwnership(opts *bind.TransactOpts, newOwner common.Address) (*types.Transaction, error) { - return _History.contract.Transact(opts, "transferOwnership", newOwner) -} - -// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. -// -// Solidity: function transferOwnership(address newOwner) returns() -func (_History *HistorySession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { - return _History.Contract.TransferOwnership(&_History.TransactOpts, newOwner) -} - -// TransferOwnership is a paid mutator transaction binding the contract method 0xf2fde38b. -// -// Solidity: function transferOwnership(address newOwner) returns() -func (_History *HistoryTransactorSession) TransferOwnership(newOwner common.Address) (*types.Transaction, error) { - return _History.Contract.TransferOwnership(&_History.TransactOpts, newOwner) -} - -// HistoryNewClaimToHistoryIterator is returned from FilterNewClaimToHistory and is used to iterate over the raw logs and unpacked data for NewClaimToHistory events raised by the History contract. -type HistoryNewClaimToHistoryIterator struct { - Event *HistoryNewClaimToHistory // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *HistoryNewClaimToHistoryIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(HistoryNewClaimToHistory) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(HistoryNewClaimToHistory) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *HistoryNewClaimToHistoryIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *HistoryNewClaimToHistoryIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// HistoryNewClaimToHistory represents a NewClaimToHistory event raised by the History contract. -type HistoryNewClaimToHistory struct { - Dapp common.Address - Claim HistoryClaim - Raw types.Log // Blockchain specific contextual infos -} - -// FilterNewClaimToHistory is a free log retrieval operation binding the contract event 0xb71880d7a0c514d48c0296b2721b0a4f9641a45117960f2ca86b5b7873c4ab2f. -// -// Solidity: event NewClaimToHistory(address indexed dapp, (bytes32,uint128,uint128) claim) -func (_History *HistoryFilterer) FilterNewClaimToHistory(opts *bind.FilterOpts, dapp []common.Address) (*HistoryNewClaimToHistoryIterator, error) { - - var dappRule []interface{} - for _, dappItem := range dapp { - dappRule = append(dappRule, dappItem) - } - - logs, sub, err := _History.contract.FilterLogs(opts, "NewClaimToHistory", dappRule) - if err != nil { - return nil, err - } - return &HistoryNewClaimToHistoryIterator{contract: _History.contract, event: "NewClaimToHistory", logs: logs, sub: sub}, nil -} - -// WatchNewClaimToHistory is a free log subscription operation binding the contract event 0xb71880d7a0c514d48c0296b2721b0a4f9641a45117960f2ca86b5b7873c4ab2f. -// -// Solidity: event NewClaimToHistory(address indexed dapp, (bytes32,uint128,uint128) claim) -func (_History *HistoryFilterer) WatchNewClaimToHistory(opts *bind.WatchOpts, sink chan<- *HistoryNewClaimToHistory, dapp []common.Address) (event.Subscription, error) { - - var dappRule []interface{} - for _, dappItem := range dapp { - dappRule = append(dappRule, dappItem) - } - - logs, sub, err := _History.contract.WatchLogs(opts, "NewClaimToHistory", dappRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(HistoryNewClaimToHistory) - if err := _History.contract.UnpackLog(event, "NewClaimToHistory", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseNewClaimToHistory is a log parse operation binding the contract event 0xb71880d7a0c514d48c0296b2721b0a4f9641a45117960f2ca86b5b7873c4ab2f. -// -// Solidity: event NewClaimToHistory(address indexed dapp, (bytes32,uint128,uint128) claim) -func (_History *HistoryFilterer) ParseNewClaimToHistory(log types.Log) (*HistoryNewClaimToHistory, error) { - event := new(HistoryNewClaimToHistory) - if err := _History.contract.UnpackLog(event, "NewClaimToHistory", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} - -// HistoryOwnershipTransferredIterator is returned from FilterOwnershipTransferred and is used to iterate over the raw logs and unpacked data for OwnershipTransferred events raised by the History contract. -type HistoryOwnershipTransferredIterator struct { - Event *HistoryOwnershipTransferred // Event containing the contract specifics and raw log - - contract *bind.BoundContract // Generic contract to use for unpacking event data - event string // Event name to use for unpacking event data - - logs chan types.Log // Log channel receiving the found contract events - sub ethereum.Subscription // Subscription for errors, completion and termination - done bool // Whether the subscription completed delivering logs - fail error // Occurred error to stop iteration -} - -// Next advances the iterator to the subsequent event, returning whether there -// are any more events found. In case of a retrieval or parsing error, false is -// returned and Error() can be queried for the exact failure. -func (it *HistoryOwnershipTransferredIterator) Next() bool { - // If the iterator failed, stop iterating - if it.fail != nil { - return false - } - // If the iterator completed, deliver directly whatever's available - if it.done { - select { - case log := <-it.logs: - it.Event = new(HistoryOwnershipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - default: - return false - } - } - // Iterator still in progress, wait for either a data or an error event - select { - case log := <-it.logs: - it.Event = new(HistoryOwnershipTransferred) - if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { - it.fail = err - return false - } - it.Event.Raw = log - return true - - case err := <-it.sub.Err(): - it.done = true - it.fail = err - return it.Next() - } -} - -// Error returns any retrieval or parsing error occurred during filtering. -func (it *HistoryOwnershipTransferredIterator) Error() error { - return it.fail -} - -// Close terminates the iteration process, releasing any pending underlying -// resources. -func (it *HistoryOwnershipTransferredIterator) Close() error { - it.sub.Unsubscribe() - return nil -} - -// HistoryOwnershipTransferred represents a OwnershipTransferred event raised by the History contract. -type HistoryOwnershipTransferred struct { - PreviousOwner common.Address - NewOwner common.Address - Raw types.Log // Blockchain specific contextual infos -} - -// FilterOwnershipTransferred is a free log retrieval operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. -// -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_History *HistoryFilterer) FilterOwnershipTransferred(opts *bind.FilterOpts, previousOwner []common.Address, newOwner []common.Address) (*HistoryOwnershipTransferredIterator, error) { - - var previousOwnerRule []interface{} - for _, previousOwnerItem := range previousOwner { - previousOwnerRule = append(previousOwnerRule, previousOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) - } - - logs, sub, err := _History.contract.FilterLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) - if err != nil { - return nil, err - } - return &HistoryOwnershipTransferredIterator{contract: _History.contract, event: "OwnershipTransferred", logs: logs, sub: sub}, nil -} - -// WatchOwnershipTransferred is a free log subscription operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. -// -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_History *HistoryFilterer) WatchOwnershipTransferred(opts *bind.WatchOpts, sink chan<- *HistoryOwnershipTransferred, previousOwner []common.Address, newOwner []common.Address) (event.Subscription, error) { - - var previousOwnerRule []interface{} - for _, previousOwnerItem := range previousOwner { - previousOwnerRule = append(previousOwnerRule, previousOwnerItem) - } - var newOwnerRule []interface{} - for _, newOwnerItem := range newOwner { - newOwnerRule = append(newOwnerRule, newOwnerItem) - } - - logs, sub, err := _History.contract.WatchLogs(opts, "OwnershipTransferred", previousOwnerRule, newOwnerRule) - if err != nil { - return nil, err - } - return event.NewSubscription(func(quit <-chan struct{}) error { - defer sub.Unsubscribe() - for { - select { - case log := <-logs: - // New log arrived, parse the event and forward to the user - event := new(HistoryOwnershipTransferred) - if err := _History.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { - return err - } - event.Raw = log - - select { - case sink <- event: - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - case err := <-sub.Err(): - return err - case <-quit: - return nil - } - } - }), nil -} - -// ParseOwnershipTransferred is a log parse operation binding the contract event 0x8be0079c531659141344cd1fd0a4f28419497f9722a3daafe3b4186f6b6457e0. -// -// Solidity: event OwnershipTransferred(address indexed previousOwner, address indexed newOwner) -func (_History *HistoryFilterer) ParseOwnershipTransferred(log types.Log) (*HistoryOwnershipTransferred, error) { - event := new(HistoryOwnershipTransferred) - if err := _History.contract.UnpackLog(event, "OwnershipTransferred", log); err != nil { - return nil, err - } - event.Raw = log - return event, nil -} diff --git a/pkg/contracts/iconsensus/iconsensus.go b/pkg/contracts/iconsensus/iconsensus.go new file mode 100644 index 000000000..5919fff1e --- /dev/null +++ b/pkg/contracts/iconsensus/iconsensus.go @@ -0,0 +1,540 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package iconsensus + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// InputRange is an auto generated low-level Go binding around an user-defined struct. +type InputRange struct { + FirstIndex uint64 + LastIndex uint64 +} + +// IConsensusMetaData contains all meta data concerning the IConsensus contract. +var IConsensusMetaData = &bind.MetaData{ + ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"firstIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"lastIndex\",\"type\":\"uint64\"}],\"indexed\":false,\"internalType\":\"structInputRange\",\"name\":\"inputRange\",\"type\":\"tuple\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"epochHash\",\"type\":\"bytes32\"}],\"name\":\"ClaimAcceptance\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"submitter\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"firstIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"lastIndex\",\"type\":\"uint64\"}],\"indexed\":false,\"internalType\":\"structInputRange\",\"name\":\"inputRange\",\"type\":\"tuple\"},{\"indexed\":false,\"internalType\":\"bytes32\",\"name\":\"epochHash\",\"type\":\"bytes32\"}],\"name\":\"ClaimSubmission\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"firstIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"lastIndex\",\"type\":\"uint64\"}],\"internalType\":\"structInputRange\",\"name\":\"inputRange\",\"type\":\"tuple\"}],\"name\":\"getEpochHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"epochHash\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"},{\"components\":[{\"internalType\":\"uint64\",\"name\":\"firstIndex\",\"type\":\"uint64\"},{\"internalType\":\"uint64\",\"name\":\"lastIndex\",\"type\":\"uint64\"}],\"internalType\":\"structInputRange\",\"name\":\"inputRange\",\"type\":\"tuple\"},{\"internalType\":\"bytes32\",\"name\":\"epochHash\",\"type\":\"bytes32\"}],\"name\":\"submitClaim\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +// IConsensusABI is the input ABI used to generate the binding from. +// Deprecated: Use IConsensusMetaData.ABI instead. +var IConsensusABI = IConsensusMetaData.ABI + +// IConsensus is an auto generated Go binding around an Ethereum contract. +type IConsensus struct { + IConsensusCaller // Read-only binding to the contract + IConsensusTransactor // Write-only binding to the contract + IConsensusFilterer // Log filterer for contract events +} + +// IConsensusCaller is an auto generated read-only Go binding around an Ethereum contract. +type IConsensusCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// IConsensusTransactor is an auto generated write-only Go binding around an Ethereum contract. +type IConsensusTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// IConsensusFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type IConsensusFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// IConsensusSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type IConsensusSession struct { + Contract *IConsensus // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// IConsensusCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type IConsensusCallerSession struct { + Contract *IConsensusCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// IConsensusTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type IConsensusTransactorSession struct { + Contract *IConsensusTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// IConsensusRaw is an auto generated low-level Go binding around an Ethereum contract. +type IConsensusRaw struct { + Contract *IConsensus // Generic contract binding to access the raw methods on +} + +// IConsensusCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type IConsensusCallerRaw struct { + Contract *IConsensusCaller // Generic read-only contract binding to access the raw methods on +} + +// IConsensusTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type IConsensusTransactorRaw struct { + Contract *IConsensusTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewIConsensus creates a new instance of IConsensus, bound to a specific deployed contract. +func NewIConsensus(address common.Address, backend bind.ContractBackend) (*IConsensus, error) { + contract, err := bindIConsensus(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &IConsensus{IConsensusCaller: IConsensusCaller{contract: contract}, IConsensusTransactor: IConsensusTransactor{contract: contract}, IConsensusFilterer: IConsensusFilterer{contract: contract}}, nil +} + +// NewIConsensusCaller creates a new read-only instance of IConsensus, bound to a specific deployed contract. +func NewIConsensusCaller(address common.Address, caller bind.ContractCaller) (*IConsensusCaller, error) { + contract, err := bindIConsensus(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &IConsensusCaller{contract: contract}, nil +} + +// NewIConsensusTransactor creates a new write-only instance of IConsensus, bound to a specific deployed contract. +func NewIConsensusTransactor(address common.Address, transactor bind.ContractTransactor) (*IConsensusTransactor, error) { + contract, err := bindIConsensus(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &IConsensusTransactor{contract: contract}, nil +} + +// NewIConsensusFilterer creates a new log filterer instance of IConsensus, bound to a specific deployed contract. +func NewIConsensusFilterer(address common.Address, filterer bind.ContractFilterer) (*IConsensusFilterer, error) { + contract, err := bindIConsensus(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &IConsensusFilterer{contract: contract}, nil +} + +// bindIConsensus binds a generic wrapper to an already deployed contract. +func bindIConsensus(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := IConsensusMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_IConsensus *IConsensusRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _IConsensus.Contract.IConsensusCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_IConsensus *IConsensusRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IConsensus.Contract.IConsensusTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_IConsensus *IConsensusRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _IConsensus.Contract.IConsensusTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_IConsensus *IConsensusCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _IConsensus.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_IConsensus *IConsensusTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _IConsensus.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_IConsensus *IConsensusTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _IConsensus.Contract.contract.Transact(opts, method, params...) +} + +// GetEpochHash is a free data retrieval call binding the contract method 0xc1f59afc. +// +// Solidity: function getEpochHash(address appContract, (uint64,uint64) inputRange) view returns(bytes32 epochHash) +func (_IConsensus *IConsensusCaller) GetEpochHash(opts *bind.CallOpts, appContract common.Address, inputRange InputRange) ([32]byte, error) { + var out []interface{} + err := _IConsensus.contract.Call(opts, &out, "getEpochHash", appContract, inputRange) + + if err != nil { + return *new([32]byte), err + } + + out0 := *abi.ConvertType(out[0], new([32]byte)).(*[32]byte) + + return out0, err + +} + +// GetEpochHash is a free data retrieval call binding the contract method 0xc1f59afc. +// +// Solidity: function getEpochHash(address appContract, (uint64,uint64) inputRange) view returns(bytes32 epochHash) +func (_IConsensus *IConsensusSession) GetEpochHash(appContract common.Address, inputRange InputRange) ([32]byte, error) { + return _IConsensus.Contract.GetEpochHash(&_IConsensus.CallOpts, appContract, inputRange) +} + +// GetEpochHash is a free data retrieval call binding the contract method 0xc1f59afc. +// +// Solidity: function getEpochHash(address appContract, (uint64,uint64) inputRange) view returns(bytes32 epochHash) +func (_IConsensus *IConsensusCallerSession) GetEpochHash(appContract common.Address, inputRange InputRange) ([32]byte, error) { + return _IConsensus.Contract.GetEpochHash(&_IConsensus.CallOpts, appContract, inputRange) +} + +// SubmitClaim is a paid mutator transaction binding the contract method 0x866b85fa. +// +// Solidity: function submitClaim(address appContract, (uint64,uint64) inputRange, bytes32 epochHash) returns() +func (_IConsensus *IConsensusTransactor) SubmitClaim(opts *bind.TransactOpts, appContract common.Address, inputRange InputRange, epochHash [32]byte) (*types.Transaction, error) { + return _IConsensus.contract.Transact(opts, "submitClaim", appContract, inputRange, epochHash) +} + +// SubmitClaim is a paid mutator transaction binding the contract method 0x866b85fa. +// +// Solidity: function submitClaim(address appContract, (uint64,uint64) inputRange, bytes32 epochHash) returns() +func (_IConsensus *IConsensusSession) SubmitClaim(appContract common.Address, inputRange InputRange, epochHash [32]byte) (*types.Transaction, error) { + return _IConsensus.Contract.SubmitClaim(&_IConsensus.TransactOpts, appContract, inputRange, epochHash) +} + +// SubmitClaim is a paid mutator transaction binding the contract method 0x866b85fa. +// +// Solidity: function submitClaim(address appContract, (uint64,uint64) inputRange, bytes32 epochHash) returns() +func (_IConsensus *IConsensusTransactorSession) SubmitClaim(appContract common.Address, inputRange InputRange, epochHash [32]byte) (*types.Transaction, error) { + return _IConsensus.Contract.SubmitClaim(&_IConsensus.TransactOpts, appContract, inputRange, epochHash) +} + +// IConsensusClaimAcceptanceIterator is returned from FilterClaimAcceptance and is used to iterate over the raw logs and unpacked data for ClaimAcceptance events raised by the IConsensus contract. +type IConsensusClaimAcceptanceIterator struct { + Event *IConsensusClaimAcceptance // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *IConsensusClaimAcceptanceIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(IConsensusClaimAcceptance) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(IConsensusClaimAcceptance) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *IConsensusClaimAcceptanceIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *IConsensusClaimAcceptanceIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// IConsensusClaimAcceptance represents a ClaimAcceptance event raised by the IConsensus contract. +type IConsensusClaimAcceptance struct { + AppContract common.Address + InputRange InputRange + EpochHash [32]byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterClaimAcceptance is a free log retrieval operation binding the contract event 0x4e068a6b8ed35e6ee03244135874f91ccebb5cd1f3a258a6dc2ad0ebd2988476. +// +// Solidity: event ClaimAcceptance(address indexed appContract, (uint64,uint64) inputRange, bytes32 epochHash) +func (_IConsensus *IConsensusFilterer) FilterClaimAcceptance(opts *bind.FilterOpts, appContract []common.Address) (*IConsensusClaimAcceptanceIterator, error) { + + var appContractRule []interface{} + for _, appContractItem := range appContract { + appContractRule = append(appContractRule, appContractItem) + } + + logs, sub, err := _IConsensus.contract.FilterLogs(opts, "ClaimAcceptance", appContractRule) + if err != nil { + return nil, err + } + return &IConsensusClaimAcceptanceIterator{contract: _IConsensus.contract, event: "ClaimAcceptance", logs: logs, sub: sub}, nil +} + +// WatchClaimAcceptance is a free log subscription operation binding the contract event 0x4e068a6b8ed35e6ee03244135874f91ccebb5cd1f3a258a6dc2ad0ebd2988476. +// +// Solidity: event ClaimAcceptance(address indexed appContract, (uint64,uint64) inputRange, bytes32 epochHash) +func (_IConsensus *IConsensusFilterer) WatchClaimAcceptance(opts *bind.WatchOpts, sink chan<- *IConsensusClaimAcceptance, appContract []common.Address) (event.Subscription, error) { + + var appContractRule []interface{} + for _, appContractItem := range appContract { + appContractRule = append(appContractRule, appContractItem) + } + + logs, sub, err := _IConsensus.contract.WatchLogs(opts, "ClaimAcceptance", appContractRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(IConsensusClaimAcceptance) + if err := _IConsensus.contract.UnpackLog(event, "ClaimAcceptance", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseClaimAcceptance is a log parse operation binding the contract event 0x4e068a6b8ed35e6ee03244135874f91ccebb5cd1f3a258a6dc2ad0ebd2988476. +// +// Solidity: event ClaimAcceptance(address indexed appContract, (uint64,uint64) inputRange, bytes32 epochHash) +func (_IConsensus *IConsensusFilterer) ParseClaimAcceptance(log types.Log) (*IConsensusClaimAcceptance, error) { + event := new(IConsensusClaimAcceptance) + if err := _IConsensus.contract.UnpackLog(event, "ClaimAcceptance", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} + +// IConsensusClaimSubmissionIterator is returned from FilterClaimSubmission and is used to iterate over the raw logs and unpacked data for ClaimSubmission events raised by the IConsensus contract. +type IConsensusClaimSubmissionIterator struct { + Event *IConsensusClaimSubmission // Event containing the contract specifics and raw log + + contract *bind.BoundContract // Generic contract to use for unpacking event data + event string // Event name to use for unpacking event data + + logs chan types.Log // Log channel receiving the found contract events + sub ethereum.Subscription // Subscription for errors, completion and termination + done bool // Whether the subscription completed delivering logs + fail error // Occurred error to stop iteration +} + +// Next advances the iterator to the subsequent event, returning whether there +// are any more events found. In case of a retrieval or parsing error, false is +// returned and Error() can be queried for the exact failure. +func (it *IConsensusClaimSubmissionIterator) Next() bool { + // If the iterator failed, stop iterating + if it.fail != nil { + return false + } + // If the iterator completed, deliver directly whatever's available + if it.done { + select { + case log := <-it.logs: + it.Event = new(IConsensusClaimSubmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + default: + return false + } + } + // Iterator still in progress, wait for either a data or an error event + select { + case log := <-it.logs: + it.Event = new(IConsensusClaimSubmission) + if err := it.contract.UnpackLog(it.Event, it.event, log); err != nil { + it.fail = err + return false + } + it.Event.Raw = log + return true + + case err := <-it.sub.Err(): + it.done = true + it.fail = err + return it.Next() + } +} + +// Error returns any retrieval or parsing error occurred during filtering. +func (it *IConsensusClaimSubmissionIterator) Error() error { + return it.fail +} + +// Close terminates the iteration process, releasing any pending underlying +// resources. +func (it *IConsensusClaimSubmissionIterator) Close() error { + it.sub.Unsubscribe() + return nil +} + +// IConsensusClaimSubmission represents a ClaimSubmission event raised by the IConsensus contract. +type IConsensusClaimSubmission struct { + Submitter common.Address + AppContract common.Address + InputRange InputRange + EpochHash [32]byte + Raw types.Log // Blockchain specific contextual infos +} + +// FilterClaimSubmission is a free log retrieval operation binding the contract event 0x940326476a755934b6ae9d2b36ffcf1f447c3a8223f6d9f8a796b54fbfcce582. +// +// Solidity: event ClaimSubmission(address indexed submitter, address indexed appContract, (uint64,uint64) inputRange, bytes32 epochHash) +func (_IConsensus *IConsensusFilterer) FilterClaimSubmission(opts *bind.FilterOpts, submitter []common.Address, appContract []common.Address) (*IConsensusClaimSubmissionIterator, error) { + + var submitterRule []interface{} + for _, submitterItem := range submitter { + submitterRule = append(submitterRule, submitterItem) + } + var appContractRule []interface{} + for _, appContractItem := range appContract { + appContractRule = append(appContractRule, appContractItem) + } + + logs, sub, err := _IConsensus.contract.FilterLogs(opts, "ClaimSubmission", submitterRule, appContractRule) + if err != nil { + return nil, err + } + return &IConsensusClaimSubmissionIterator{contract: _IConsensus.contract, event: "ClaimSubmission", logs: logs, sub: sub}, nil +} + +// WatchClaimSubmission is a free log subscription operation binding the contract event 0x940326476a755934b6ae9d2b36ffcf1f447c3a8223f6d9f8a796b54fbfcce582. +// +// Solidity: event ClaimSubmission(address indexed submitter, address indexed appContract, (uint64,uint64) inputRange, bytes32 epochHash) +func (_IConsensus *IConsensusFilterer) WatchClaimSubmission(opts *bind.WatchOpts, sink chan<- *IConsensusClaimSubmission, submitter []common.Address, appContract []common.Address) (event.Subscription, error) { + + var submitterRule []interface{} + for _, submitterItem := range submitter { + submitterRule = append(submitterRule, submitterItem) + } + var appContractRule []interface{} + for _, appContractItem := range appContract { + appContractRule = append(appContractRule, appContractItem) + } + + logs, sub, err := _IConsensus.contract.WatchLogs(opts, "ClaimSubmission", submitterRule, appContractRule) + if err != nil { + return nil, err + } + return event.NewSubscription(func(quit <-chan struct{}) error { + defer sub.Unsubscribe() + for { + select { + case log := <-logs: + // New log arrived, parse the event and forward to the user + event := new(IConsensusClaimSubmission) + if err := _IConsensus.contract.UnpackLog(event, "ClaimSubmission", log); err != nil { + return err + } + event.Raw = log + + select { + case sink <- event: + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + case err := <-sub.Err(): + return err + case <-quit: + return nil + } + } + }), nil +} + +// ParseClaimSubmission is a log parse operation binding the contract event 0x940326476a755934b6ae9d2b36ffcf1f447c3a8223f6d9f8a796b54fbfcce582. +// +// Solidity: event ClaimSubmission(address indexed submitter, address indexed appContract, (uint64,uint64) inputRange, bytes32 epochHash) +func (_IConsensus *IConsensusFilterer) ParseClaimSubmission(log types.Log) (*IConsensusClaimSubmission, error) { + event := new(IConsensusClaimSubmission) + if err := _IConsensus.contract.UnpackLog(event, "ClaimSubmission", log); err != nil { + return nil, err + } + event.Raw = log + return event, nil +} diff --git a/pkg/contracts/input_box.go b/pkg/contracts/inputbox/inputbox.go similarity index 71% rename from pkg/contracts/input_box.go rename to pkg/contracts/inputbox/inputbox.go index 494c9ed40..a9e0620b8 100644 --- a/pkg/contracts/input_box.go +++ b/pkg/contracts/inputbox/inputbox.go @@ -1,7 +1,7 @@ // Code generated - DO NOT EDIT. // This file is a generated binding and any manual changes will be lost. -package contracts +package inputbox import ( "errors" @@ -31,7 +31,7 @@ var ( // InputBoxMetaData contains all meta data concerning the InputBox contract. var InputBoxMetaData = &bind.MetaData{ - ABI: "[{\"inputs\":[],\"name\":\"InputSizeExceedsLimit\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"dapp\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"inputIndex\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"sender\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"input\",\"type\":\"bytes\"}],\"name\":\"InputAdded\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_dapp\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"_input\",\"type\":\"bytes\"}],\"name\":\"addInput\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_dapp\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"_index\",\"type\":\"uint256\"}],\"name\":\"getInputHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_dapp\",\"type\":\"address\"}],\"name\":\"getNumberOfInputs\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", + ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"inputLength\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"maxInputLength\",\"type\":\"uint256\"}],\"name\":\"InputTooLarge\",\"type\":\"error\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"indexed\":false,\"internalType\":\"bytes\",\"name\":\"input\",\"type\":\"bytes\"}],\"name\":\"InputAdded\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"},{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"}],\"name\":\"addInput\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"}],\"name\":\"getInputHash\",\"outputs\":[{\"internalType\":\"bytes32\",\"name\":\"\",\"type\":\"bytes32\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"}],\"name\":\"getNumberOfInputs\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]", } // InputBoxABI is the input ABI used to generate the binding from. @@ -182,10 +182,10 @@ func (_InputBox *InputBoxTransactorRaw) Transact(opts *bind.TransactOpts, method // GetInputHash is a free data retrieval call binding the contract method 0x677087c9. // -// Solidity: function getInputHash(address _dapp, uint256 _index) view returns(bytes32) -func (_InputBox *InputBoxCaller) GetInputHash(opts *bind.CallOpts, _dapp common.Address, _index *big.Int) ([32]byte, error) { +// Solidity: function getInputHash(address appContract, uint256 index) view returns(bytes32) +func (_InputBox *InputBoxCaller) GetInputHash(opts *bind.CallOpts, appContract common.Address, index *big.Int) ([32]byte, error) { var out []interface{} - err := _InputBox.contract.Call(opts, &out, "getInputHash", _dapp, _index) + err := _InputBox.contract.Call(opts, &out, "getInputHash", appContract, index) if err != nil { return *new([32]byte), err @@ -199,24 +199,24 @@ func (_InputBox *InputBoxCaller) GetInputHash(opts *bind.CallOpts, _dapp common. // GetInputHash is a free data retrieval call binding the contract method 0x677087c9. // -// Solidity: function getInputHash(address _dapp, uint256 _index) view returns(bytes32) -func (_InputBox *InputBoxSession) GetInputHash(_dapp common.Address, _index *big.Int) ([32]byte, error) { - return _InputBox.Contract.GetInputHash(&_InputBox.CallOpts, _dapp, _index) +// Solidity: function getInputHash(address appContract, uint256 index) view returns(bytes32) +func (_InputBox *InputBoxSession) GetInputHash(appContract common.Address, index *big.Int) ([32]byte, error) { + return _InputBox.Contract.GetInputHash(&_InputBox.CallOpts, appContract, index) } // GetInputHash is a free data retrieval call binding the contract method 0x677087c9. // -// Solidity: function getInputHash(address _dapp, uint256 _index) view returns(bytes32) -func (_InputBox *InputBoxCallerSession) GetInputHash(_dapp common.Address, _index *big.Int) ([32]byte, error) { - return _InputBox.Contract.GetInputHash(&_InputBox.CallOpts, _dapp, _index) +// Solidity: function getInputHash(address appContract, uint256 index) view returns(bytes32) +func (_InputBox *InputBoxCallerSession) GetInputHash(appContract common.Address, index *big.Int) ([32]byte, error) { + return _InputBox.Contract.GetInputHash(&_InputBox.CallOpts, appContract, index) } // GetNumberOfInputs is a free data retrieval call binding the contract method 0x61a93c87. // -// Solidity: function getNumberOfInputs(address _dapp) view returns(uint256) -func (_InputBox *InputBoxCaller) GetNumberOfInputs(opts *bind.CallOpts, _dapp common.Address) (*big.Int, error) { +// Solidity: function getNumberOfInputs(address appContract) view returns(uint256) +func (_InputBox *InputBoxCaller) GetNumberOfInputs(opts *bind.CallOpts, appContract common.Address) (*big.Int, error) { var out []interface{} - err := _InputBox.contract.Call(opts, &out, "getNumberOfInputs", _dapp) + err := _InputBox.contract.Call(opts, &out, "getNumberOfInputs", appContract) if err != nil { return *new(*big.Int), err @@ -230,37 +230,37 @@ func (_InputBox *InputBoxCaller) GetNumberOfInputs(opts *bind.CallOpts, _dapp co // GetNumberOfInputs is a free data retrieval call binding the contract method 0x61a93c87. // -// Solidity: function getNumberOfInputs(address _dapp) view returns(uint256) -func (_InputBox *InputBoxSession) GetNumberOfInputs(_dapp common.Address) (*big.Int, error) { - return _InputBox.Contract.GetNumberOfInputs(&_InputBox.CallOpts, _dapp) +// Solidity: function getNumberOfInputs(address appContract) view returns(uint256) +func (_InputBox *InputBoxSession) GetNumberOfInputs(appContract common.Address) (*big.Int, error) { + return _InputBox.Contract.GetNumberOfInputs(&_InputBox.CallOpts, appContract) } // GetNumberOfInputs is a free data retrieval call binding the contract method 0x61a93c87. // -// Solidity: function getNumberOfInputs(address _dapp) view returns(uint256) -func (_InputBox *InputBoxCallerSession) GetNumberOfInputs(_dapp common.Address) (*big.Int, error) { - return _InputBox.Contract.GetNumberOfInputs(&_InputBox.CallOpts, _dapp) +// Solidity: function getNumberOfInputs(address appContract) view returns(uint256) +func (_InputBox *InputBoxCallerSession) GetNumberOfInputs(appContract common.Address) (*big.Int, error) { + return _InputBox.Contract.GetNumberOfInputs(&_InputBox.CallOpts, appContract) } // AddInput is a paid mutator transaction binding the contract method 0x1789cd63. // -// Solidity: function addInput(address _dapp, bytes _input) returns(bytes32) -func (_InputBox *InputBoxTransactor) AddInput(opts *bind.TransactOpts, _dapp common.Address, _input []byte) (*types.Transaction, error) { - return _InputBox.contract.Transact(opts, "addInput", _dapp, _input) +// Solidity: function addInput(address appContract, bytes payload) returns(bytes32) +func (_InputBox *InputBoxTransactor) AddInput(opts *bind.TransactOpts, appContract common.Address, payload []byte) (*types.Transaction, error) { + return _InputBox.contract.Transact(opts, "addInput", appContract, payload) } // AddInput is a paid mutator transaction binding the contract method 0x1789cd63. // -// Solidity: function addInput(address _dapp, bytes _input) returns(bytes32) -func (_InputBox *InputBoxSession) AddInput(_dapp common.Address, _input []byte) (*types.Transaction, error) { - return _InputBox.Contract.AddInput(&_InputBox.TransactOpts, _dapp, _input) +// Solidity: function addInput(address appContract, bytes payload) returns(bytes32) +func (_InputBox *InputBoxSession) AddInput(appContract common.Address, payload []byte) (*types.Transaction, error) { + return _InputBox.Contract.AddInput(&_InputBox.TransactOpts, appContract, payload) } // AddInput is a paid mutator transaction binding the contract method 0x1789cd63. // -// Solidity: function addInput(address _dapp, bytes _input) returns(bytes32) -func (_InputBox *InputBoxTransactorSession) AddInput(_dapp common.Address, _input []byte) (*types.Transaction, error) { - return _InputBox.Contract.AddInput(&_InputBox.TransactOpts, _dapp, _input) +// Solidity: function addInput(address appContract, bytes payload) returns(bytes32) +func (_InputBox *InputBoxTransactorSession) AddInput(appContract common.Address, payload []byte) (*types.Transaction, error) { + return _InputBox.Contract.AddInput(&_InputBox.TransactOpts, appContract, payload) } // InputBoxInputAddedIterator is returned from FilterInputAdded and is used to iterate over the raw logs and unpacked data for InputAdded events raised by the InputBox contract. @@ -332,49 +332,48 @@ func (it *InputBoxInputAddedIterator) Close() error { // InputBoxInputAdded represents a InputAdded event raised by the InputBox contract. type InputBoxInputAdded struct { - Dapp common.Address - InputIndex *big.Int - Sender common.Address - Input []byte - Raw types.Log // Blockchain specific contextual infos + AppContract common.Address + Index *big.Int + Input []byte + Raw types.Log // Blockchain specific contextual infos } -// FilterInputAdded is a free log retrieval operation binding the contract event 0x6aaa400068bf4ca337265e2a1e1e841f66b8597fd5b452fdc52a44bed28a0784. +// FilterInputAdded is a free log retrieval operation binding the contract event 0xc05d337121a6e8605c6ec0b72aa29c4210ffe6e5b9cefdd6a7058188a8f66f98. // -// Solidity: event InputAdded(address indexed dapp, uint256 indexed inputIndex, address sender, bytes input) -func (_InputBox *InputBoxFilterer) FilterInputAdded(opts *bind.FilterOpts, dapp []common.Address, inputIndex []*big.Int) (*InputBoxInputAddedIterator, error) { +// Solidity: event InputAdded(address indexed appContract, uint256 indexed index, bytes input) +func (_InputBox *InputBoxFilterer) FilterInputAdded(opts *bind.FilterOpts, appContract []common.Address, index []*big.Int) (*InputBoxInputAddedIterator, error) { - var dappRule []interface{} - for _, dappItem := range dapp { - dappRule = append(dappRule, dappItem) + var appContractRule []interface{} + for _, appContractItem := range appContract { + appContractRule = append(appContractRule, appContractItem) } - var inputIndexRule []interface{} - for _, inputIndexItem := range inputIndex { - inputIndexRule = append(inputIndexRule, inputIndexItem) + var indexRule []interface{} + for _, indexItem := range index { + indexRule = append(indexRule, indexItem) } - logs, sub, err := _InputBox.contract.FilterLogs(opts, "InputAdded", dappRule, inputIndexRule) + logs, sub, err := _InputBox.contract.FilterLogs(opts, "InputAdded", appContractRule, indexRule) if err != nil { return nil, err } return &InputBoxInputAddedIterator{contract: _InputBox.contract, event: "InputAdded", logs: logs, sub: sub}, nil } -// WatchInputAdded is a free log subscription operation binding the contract event 0x6aaa400068bf4ca337265e2a1e1e841f66b8597fd5b452fdc52a44bed28a0784. +// WatchInputAdded is a free log subscription operation binding the contract event 0xc05d337121a6e8605c6ec0b72aa29c4210ffe6e5b9cefdd6a7058188a8f66f98. // -// Solidity: event InputAdded(address indexed dapp, uint256 indexed inputIndex, address sender, bytes input) -func (_InputBox *InputBoxFilterer) WatchInputAdded(opts *bind.WatchOpts, sink chan<- *InputBoxInputAdded, dapp []common.Address, inputIndex []*big.Int) (event.Subscription, error) { +// Solidity: event InputAdded(address indexed appContract, uint256 indexed index, bytes input) +func (_InputBox *InputBoxFilterer) WatchInputAdded(opts *bind.WatchOpts, sink chan<- *InputBoxInputAdded, appContract []common.Address, index []*big.Int) (event.Subscription, error) { - var dappRule []interface{} - for _, dappItem := range dapp { - dappRule = append(dappRule, dappItem) + var appContractRule []interface{} + for _, appContractItem := range appContract { + appContractRule = append(appContractRule, appContractItem) } - var inputIndexRule []interface{} - for _, inputIndexItem := range inputIndex { - inputIndexRule = append(inputIndexRule, inputIndexItem) + var indexRule []interface{} + for _, indexItem := range index { + indexRule = append(indexRule, indexItem) } - logs, sub, err := _InputBox.contract.WatchLogs(opts, "InputAdded", dappRule, inputIndexRule) + logs, sub, err := _InputBox.contract.WatchLogs(opts, "InputAdded", appContractRule, indexRule) if err != nil { return nil, err } @@ -406,9 +405,9 @@ func (_InputBox *InputBoxFilterer) WatchInputAdded(opts *bind.WatchOpts, sink ch }), nil } -// ParseInputAdded is a log parse operation binding the contract event 0x6aaa400068bf4ca337265e2a1e1e841f66b8597fd5b452fdc52a44bed28a0784. +// ParseInputAdded is a log parse operation binding the contract event 0xc05d337121a6e8605c6ec0b72aa29c4210ffe6e5b9cefdd6a7058188a8f66f98. // -// Solidity: event InputAdded(address indexed dapp, uint256 indexed inputIndex, address sender, bytes input) +// Solidity: event InputAdded(address indexed appContract, uint256 indexed index, bytes input) func (_InputBox *InputBoxFilterer) ParseInputAdded(log types.Log) (*InputBoxInputAdded, error) { event := new(InputBoxInputAdded) if err := _InputBox.contract.UnpackLog(event, "InputAdded", log); err != nil { diff --git a/pkg/contracts/inputs/inputs.go b/pkg/contracts/inputs/inputs.go new file mode 100644 index 000000000..0e143e126 --- /dev/null +++ b/pkg/contracts/inputs/inputs.go @@ -0,0 +1,202 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package inputs + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// InputsMetaData contains all meta data concerning the Inputs contract. +var InputsMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"chainId\",\"type\":\"uint256\"},{\"internalType\":\"address\",\"name\":\"appContract\",\"type\":\"address\"},{\"internalType\":\"address\",\"name\":\"msgSender\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"blockNumber\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"blockTimestamp\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"index\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"}],\"name\":\"EvmAdvance\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +// InputsABI is the input ABI used to generate the binding from. +// Deprecated: Use InputsMetaData.ABI instead. +var InputsABI = InputsMetaData.ABI + +// Inputs is an auto generated Go binding around an Ethereum contract. +type Inputs struct { + InputsCaller // Read-only binding to the contract + InputsTransactor // Write-only binding to the contract + InputsFilterer // Log filterer for contract events +} + +// InputsCaller is an auto generated read-only Go binding around an Ethereum contract. +type InputsCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// InputsTransactor is an auto generated write-only Go binding around an Ethereum contract. +type InputsTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// InputsFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type InputsFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// InputsSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type InputsSession struct { + Contract *Inputs // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// InputsCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type InputsCallerSession struct { + Contract *InputsCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// InputsTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type InputsTransactorSession struct { + Contract *InputsTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// InputsRaw is an auto generated low-level Go binding around an Ethereum contract. +type InputsRaw struct { + Contract *Inputs // Generic contract binding to access the raw methods on +} + +// InputsCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type InputsCallerRaw struct { + Contract *InputsCaller // Generic read-only contract binding to access the raw methods on +} + +// InputsTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type InputsTransactorRaw struct { + Contract *InputsTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewInputs creates a new instance of Inputs, bound to a specific deployed contract. +func NewInputs(address common.Address, backend bind.ContractBackend) (*Inputs, error) { + contract, err := bindInputs(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Inputs{InputsCaller: InputsCaller{contract: contract}, InputsTransactor: InputsTransactor{contract: contract}, InputsFilterer: InputsFilterer{contract: contract}}, nil +} + +// NewInputsCaller creates a new read-only instance of Inputs, bound to a specific deployed contract. +func NewInputsCaller(address common.Address, caller bind.ContractCaller) (*InputsCaller, error) { + contract, err := bindInputs(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &InputsCaller{contract: contract}, nil +} + +// NewInputsTransactor creates a new write-only instance of Inputs, bound to a specific deployed contract. +func NewInputsTransactor(address common.Address, transactor bind.ContractTransactor) (*InputsTransactor, error) { + contract, err := bindInputs(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &InputsTransactor{contract: contract}, nil +} + +// NewInputsFilterer creates a new log filterer instance of Inputs, bound to a specific deployed contract. +func NewInputsFilterer(address common.Address, filterer bind.ContractFilterer) (*InputsFilterer, error) { + contract, err := bindInputs(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &InputsFilterer{contract: contract}, nil +} + +// bindInputs binds a generic wrapper to an already deployed contract. +func bindInputs(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := InputsMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Inputs *InputsRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Inputs.Contract.InputsCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Inputs *InputsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Inputs.Contract.InputsTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Inputs *InputsRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Inputs.Contract.InputsTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Inputs *InputsCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Inputs.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Inputs *InputsTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Inputs.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Inputs *InputsTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Inputs.Contract.contract.Transact(opts, method, params...) +} + +// EvmAdvance is a paid mutator transaction binding the contract method 0xcc7dee1f. +// +// Solidity: function EvmAdvance(uint256 chainId, address appContract, address msgSender, uint256 blockNumber, uint256 blockTimestamp, uint256 index, bytes payload) returns() +func (_Inputs *InputsTransactor) EvmAdvance(opts *bind.TransactOpts, chainId *big.Int, appContract common.Address, msgSender common.Address, blockNumber *big.Int, blockTimestamp *big.Int, index *big.Int, payload []byte) (*types.Transaction, error) { + return _Inputs.contract.Transact(opts, "EvmAdvance", chainId, appContract, msgSender, blockNumber, blockTimestamp, index, payload) +} + +// EvmAdvance is a paid mutator transaction binding the contract method 0xcc7dee1f. +// +// Solidity: function EvmAdvance(uint256 chainId, address appContract, address msgSender, uint256 blockNumber, uint256 blockTimestamp, uint256 index, bytes payload) returns() +func (_Inputs *InputsSession) EvmAdvance(chainId *big.Int, appContract common.Address, msgSender common.Address, blockNumber *big.Int, blockTimestamp *big.Int, index *big.Int, payload []byte) (*types.Transaction, error) { + return _Inputs.Contract.EvmAdvance(&_Inputs.TransactOpts, chainId, appContract, msgSender, blockNumber, blockTimestamp, index, payload) +} + +// EvmAdvance is a paid mutator transaction binding the contract method 0xcc7dee1f. +// +// Solidity: function EvmAdvance(uint256 chainId, address appContract, address msgSender, uint256 blockNumber, uint256 blockTimestamp, uint256 index, bytes payload) returns() +func (_Inputs *InputsTransactorSession) EvmAdvance(chainId *big.Int, appContract common.Address, msgSender common.Address, blockNumber *big.Int, blockTimestamp *big.Int, index *big.Int, payload []byte) (*types.Transaction, error) { + return _Inputs.Contract.EvmAdvance(&_Inputs.TransactOpts, chainId, appContract, msgSender, blockNumber, blockTimestamp, index, payload) +} diff --git a/pkg/contracts/outputs/outputs.go b/pkg/contracts/outputs/outputs.go new file mode 100644 index 000000000..00dc229f2 --- /dev/null +++ b/pkg/contracts/outputs/outputs.go @@ -0,0 +1,223 @@ +// Code generated - DO NOT EDIT. +// This file is a generated binding and any manual changes will be lost. + +package outputs + +import ( + "errors" + "math/big" + "strings" + + ethereum "github.com/ethereum/go-ethereum" + "github.com/ethereum/go-ethereum/accounts/abi" + "github.com/ethereum/go-ethereum/accounts/abi/bind" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/event" +) + +// Reference imports to suppress errors if they are not otherwise used. +var ( + _ = errors.New + _ = big.NewInt + _ = strings.NewReader + _ = ethereum.NotFound + _ = bind.Bind + _ = common.Big1 + _ = types.BloomLookup + _ = event.NewSubscription + _ = abi.ConvertType +) + +// OutputsMetaData contains all meta data concerning the Outputs contract. +var OutputsMetaData = &bind.MetaData{ + ABI: "[{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"}],\"name\":\"Notice\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"destination\",\"type\":\"address\"},{\"internalType\":\"uint256\",\"name\":\"value\",\"type\":\"uint256\"},{\"internalType\":\"bytes\",\"name\":\"payload\",\"type\":\"bytes\"}],\"name\":\"Voucher\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]", +} + +// OutputsABI is the input ABI used to generate the binding from. +// Deprecated: Use OutputsMetaData.ABI instead. +var OutputsABI = OutputsMetaData.ABI + +// Outputs is an auto generated Go binding around an Ethereum contract. +type Outputs struct { + OutputsCaller // Read-only binding to the contract + OutputsTransactor // Write-only binding to the contract + OutputsFilterer // Log filterer for contract events +} + +// OutputsCaller is an auto generated read-only Go binding around an Ethereum contract. +type OutputsCaller struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// OutputsTransactor is an auto generated write-only Go binding around an Ethereum contract. +type OutputsTransactor struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// OutputsFilterer is an auto generated log filtering Go binding around an Ethereum contract events. +type OutputsFilterer struct { + contract *bind.BoundContract // Generic contract wrapper for the low level calls +} + +// OutputsSession is an auto generated Go binding around an Ethereum contract, +// with pre-set call and transact options. +type OutputsSession struct { + Contract *Outputs // Generic contract binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// OutputsCallerSession is an auto generated read-only Go binding around an Ethereum contract, +// with pre-set call options. +type OutputsCallerSession struct { + Contract *OutputsCaller // Generic contract caller binding to set the session for + CallOpts bind.CallOpts // Call options to use throughout this session +} + +// OutputsTransactorSession is an auto generated write-only Go binding around an Ethereum contract, +// with pre-set transact options. +type OutputsTransactorSession struct { + Contract *OutputsTransactor // Generic contract transactor binding to set the session for + TransactOpts bind.TransactOpts // Transaction auth options to use throughout this session +} + +// OutputsRaw is an auto generated low-level Go binding around an Ethereum contract. +type OutputsRaw struct { + Contract *Outputs // Generic contract binding to access the raw methods on +} + +// OutputsCallerRaw is an auto generated low-level read-only Go binding around an Ethereum contract. +type OutputsCallerRaw struct { + Contract *OutputsCaller // Generic read-only contract binding to access the raw methods on +} + +// OutputsTransactorRaw is an auto generated low-level write-only Go binding around an Ethereum contract. +type OutputsTransactorRaw struct { + Contract *OutputsTransactor // Generic write-only contract binding to access the raw methods on +} + +// NewOutputs creates a new instance of Outputs, bound to a specific deployed contract. +func NewOutputs(address common.Address, backend bind.ContractBackend) (*Outputs, error) { + contract, err := bindOutputs(address, backend, backend, backend) + if err != nil { + return nil, err + } + return &Outputs{OutputsCaller: OutputsCaller{contract: contract}, OutputsTransactor: OutputsTransactor{contract: contract}, OutputsFilterer: OutputsFilterer{contract: contract}}, nil +} + +// NewOutputsCaller creates a new read-only instance of Outputs, bound to a specific deployed contract. +func NewOutputsCaller(address common.Address, caller bind.ContractCaller) (*OutputsCaller, error) { + contract, err := bindOutputs(address, caller, nil, nil) + if err != nil { + return nil, err + } + return &OutputsCaller{contract: contract}, nil +} + +// NewOutputsTransactor creates a new write-only instance of Outputs, bound to a specific deployed contract. +func NewOutputsTransactor(address common.Address, transactor bind.ContractTransactor) (*OutputsTransactor, error) { + contract, err := bindOutputs(address, nil, transactor, nil) + if err != nil { + return nil, err + } + return &OutputsTransactor{contract: contract}, nil +} + +// NewOutputsFilterer creates a new log filterer instance of Outputs, bound to a specific deployed contract. +func NewOutputsFilterer(address common.Address, filterer bind.ContractFilterer) (*OutputsFilterer, error) { + contract, err := bindOutputs(address, nil, nil, filterer) + if err != nil { + return nil, err + } + return &OutputsFilterer{contract: contract}, nil +} + +// bindOutputs binds a generic wrapper to an already deployed contract. +func bindOutputs(address common.Address, caller bind.ContractCaller, transactor bind.ContractTransactor, filterer bind.ContractFilterer) (*bind.BoundContract, error) { + parsed, err := OutputsMetaData.GetAbi() + if err != nil { + return nil, err + } + return bind.NewBoundContract(address, *parsed, caller, transactor, filterer), nil +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Outputs *OutputsRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Outputs.Contract.OutputsCaller.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Outputs *OutputsRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Outputs.Contract.OutputsTransactor.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Outputs *OutputsRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Outputs.Contract.OutputsTransactor.contract.Transact(opts, method, params...) +} + +// Call invokes the (constant) contract method with params as input values and +// sets the output to result. The result type might be a single field for simple +// returns, a slice of interfaces for anonymous returns and a struct for named +// returns. +func (_Outputs *OutputsCallerRaw) Call(opts *bind.CallOpts, result *[]interface{}, method string, params ...interface{}) error { + return _Outputs.Contract.contract.Call(opts, result, method, params...) +} + +// Transfer initiates a plain transaction to move funds to the contract, calling +// its default method if one is available. +func (_Outputs *OutputsTransactorRaw) Transfer(opts *bind.TransactOpts) (*types.Transaction, error) { + return _Outputs.Contract.contract.Transfer(opts) +} + +// Transact invokes the (paid) contract method with params as input values. +func (_Outputs *OutputsTransactorRaw) Transact(opts *bind.TransactOpts, method string, params ...interface{}) (*types.Transaction, error) { + return _Outputs.Contract.contract.Transact(opts, method, params...) +} + +// Notice is a paid mutator transaction binding the contract method 0xc258d6e5. +// +// Solidity: function Notice(bytes payload) returns() +func (_Outputs *OutputsTransactor) Notice(opts *bind.TransactOpts, payload []byte) (*types.Transaction, error) { + return _Outputs.contract.Transact(opts, "Notice", payload) +} + +// Notice is a paid mutator transaction binding the contract method 0xc258d6e5. +// +// Solidity: function Notice(bytes payload) returns() +func (_Outputs *OutputsSession) Notice(payload []byte) (*types.Transaction, error) { + return _Outputs.Contract.Notice(&_Outputs.TransactOpts, payload) +} + +// Notice is a paid mutator transaction binding the contract method 0xc258d6e5. +// +// Solidity: function Notice(bytes payload) returns() +func (_Outputs *OutputsTransactorSession) Notice(payload []byte) (*types.Transaction, error) { + return _Outputs.Contract.Notice(&_Outputs.TransactOpts, payload) +} + +// Voucher is a paid mutator transaction binding the contract method 0x237a816f. +// +// Solidity: function Voucher(address destination, uint256 value, bytes payload) returns() +func (_Outputs *OutputsTransactor) Voucher(opts *bind.TransactOpts, destination common.Address, value *big.Int, payload []byte) (*types.Transaction, error) { + return _Outputs.contract.Transact(opts, "Voucher", destination, value, payload) +} + +// Voucher is a paid mutator transaction binding the contract method 0x237a816f. +// +// Solidity: function Voucher(address destination, uint256 value, bytes payload) returns() +func (_Outputs *OutputsSession) Voucher(destination common.Address, value *big.Int, payload []byte) (*types.Transaction, error) { + return _Outputs.Contract.Voucher(&_Outputs.TransactOpts, destination, value, payload) +} + +// Voucher is a paid mutator transaction binding the contract method 0x237a816f. +// +// Solidity: function Voucher(address destination, uint256 value, bytes payload) returns() +func (_Outputs *OutputsTransactorSession) Voucher(destination common.Address, value *big.Int, payload []byte) (*types.Transaction, error) { + return _Outputs.Contract.Voucher(&_Outputs.TransactOpts, destination, value, payload) +} diff --git a/pkg/ethutil/ethutil.go b/pkg/ethutil/ethutil.go index 89e884c1e..1ec16f1a1 100644 --- a/pkg/ethutil/ethutil.go +++ b/pkg/ethutil/ethutil.go @@ -11,7 +11,8 @@ import ( "math/big" "github.com/cartesi/rollups-node/pkg/addresses" - "github.com/cartesi/rollups-node/pkg/contracts" + "github.com/cartesi/rollups-node/pkg/contracts/application" + "github.com/cartesi/rollups-node/pkg/contracts/inputbox" "github.com/ethereum/go-ethereum/accounts/abi/bind" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/common/hexutil" @@ -45,14 +46,14 @@ func AddInput( signer Signer, input []byte, ) (int, error) { - inputBox, err := contracts.NewInputBox(book.InputBox, client) + inputBox, err := inputbox.NewInputBox(book.InputBox, client) if err != nil { return 0, fmt.Errorf("failed to connect to InputBox contract: %v", err) } receipt, err := sendTransaction( ctx, client, signer, big.NewInt(0), GasLimit, func(txOpts *bind.TransactOpts) (*types.Transaction, error) { - return inputBox.AddInput(txOpts, book.CartesiDApp, input) + return inputBox.AddInput(txOpts, book.Application, input) }, ) if err != nil { @@ -94,7 +95,7 @@ func getInputIndex( ctx context.Context, client *ethclient.Client, book *addresses.Book, - inputBox *contracts.InputBox, + inputBox *inputbox.InputBox, receipt *types.Receipt, ) (int, error) { for _, log := range receipt.Logs { @@ -106,7 +107,7 @@ func getInputIndex( return 0, fmt.Errorf("failed to parse input added event: %v", err) } // We assume that int will fit all dapp inputs - inputIndex := int(inputAdded.InputIndex.Int64()) + inputIndex := int(inputAdded.Index.Int64()) return inputIndex, nil } return 0, fmt.Errorf("input index not found") @@ -118,14 +119,14 @@ func GetInputFromInputBox( client *ethclient.Client, book *addresses.Book, inputIndex int, -) (*contracts.InputBoxInputAdded, error) { - inputBox, err := contracts.NewInputBox(book.InputBox, client) +) (*inputbox.InputBoxInputAdded, error) { + inputBox, err := inputbox.NewInputBox(book.InputBox, client) if err != nil { return nil, fmt.Errorf("failed to connect to InputBox contract: %v", err) } it, err := inputBox.FilterInputAdded( nil, - []common.Address{book.CartesiDApp}, + []common.Address{book.Application}, []*big.Int{big.NewInt(int64(inputIndex))}, ) if err != nil { @@ -140,47 +141,38 @@ func GetInputFromInputBox( // ValidateNotice validates the given notice for the specified Dapp. // It returns nil if the notice is valid and an execution-reverted error otherwise. -func ValidateNotice( +func ValidateOutput( ctx context.Context, client *ethclient.Client, book *addresses.Book, - notice []byte, - proof *contracts.Proof, + output []byte, + proof *application.OutputValidityProof, ) error { - - dapp, err := contracts.NewCartesiDApp(book.CartesiDApp, client) + app, err := application.NewApplication(book.Application, client) if err != nil { return fmt.Errorf("failed to connect to CartesiDapp contract: %v", err) } - - response, err := dapp.ValidateNotice(&bind.CallOpts{Context: ctx}, notice, *proof) - _ = response - if err != nil { - return err - } - - return nil + return app.ValidateOutput(&bind.CallOpts{Context: ctx}, output, *proof) } // Executes a voucher given its payload, destination and proof. // This function waits until the transaction is added to a block and returns the transaction hash. -func ExecuteVoucher( +func ExecuteOutput( ctx context.Context, client *ethclient.Client, book *addresses.Book, signer Signer, - voucher []byte, - destination *common.Address, - proof *contracts.Proof, + output []byte, + proof *application.OutputValidityProof, ) (*common.Hash, error) { - dapp, err := contracts.NewCartesiDApp(book.CartesiDApp, client) + app, err := application.NewApplication(book.Application, client) if err != nil { return nil, fmt.Errorf("failed to connect to CartesiDapp contract: %v", err) } receipt, err := sendTransaction( ctx, client, signer, big.NewInt(0), GasLimit, func(txOpts *bind.TransactOpts) (*types.Transaction, error) { - return dapp.ExecuteVoucher(txOpts, *destination, voucher, *proof) + return app.ExecuteOutput(txOpts, output, *proof) }, ) if err != nil { diff --git a/pkg/ethutil/ethutil_test.go b/pkg/ethutil/ethutil_test.go index 34a3d154d..f06a62b3f 100644 --- a/pkg/ethutil/ethutil_test.go +++ b/pkg/ethutil/ethutil_test.go @@ -11,6 +11,7 @@ import ( "github.com/cartesi/rollups-node/internal/deps" "github.com/cartesi/rollups-node/pkg/addresses" + "github.com/cartesi/rollups-node/pkg/contracts/inputs" "github.com/cartesi/rollups-node/pkg/testutil" "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/ethclient" @@ -57,7 +58,7 @@ func (s *EthUtilSuite) TearDownTest() { } func (s *EthUtilSuite) TestAddInput() { - sender := common.HexToAddress("f39fd6e51aad88f6f4ce6ab8827279cfffb92266") + sender := common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266") payload := common.Hex2Bytes("deadbeef") inputIndex, err := AddInput(s.ctx, s.client, s.book, s.signer, payload) @@ -65,13 +66,21 @@ func (s *EthUtilSuite) TestAddInput() { s.logDevnetOutput() s.T().FailNow() } - s.Require().Equal(0, inputIndex) event, err := GetInputFromInputBox(s.client, s.book, inputIndex) s.Require().Nil(err) - s.Require().Equal(sender, event.Sender) - s.Require().Equal(payload, event.Input) + + inputsABI, err := inputs.InputsMetaData.GetAbi() + s.Require().Nil(err) + advanceInputABI := inputsABI.Methods["EvmAdvance"] + inputArgs := map[string]interface{}{} + err = advanceInputABI.Inputs.UnpackIntoMap(inputArgs, event.Input[4:]) + s.Require().Nil(err) + + s.T().Log(inputArgs) + s.Require().Equal(sender, inputArgs["msgSender"]) + s.Require().Equal(payload, inputArgs["payload"]) } // Log the output of the given container diff --git a/pkg/readerclient/proof.go b/pkg/readerclient/proof.go index 57e0547fd..1c5f9de40 100644 --- a/pkg/readerclient/proof.go +++ b/pkg/readerclient/proof.go @@ -6,7 +6,7 @@ package readerclient import ( "fmt" - "github.com/cartesi/rollups-node/pkg/contracts" + "github.com/cartesi/rollups-node/pkg/contracts/application" "github.com/ethereum/go-ethereum/common/hexutil" ) @@ -126,35 +126,8 @@ func newProof( return &proof, err } -func ConvertToContractProof(proof *Proof) *contracts.Proof { - var ( - outputHashOutputSiblings [][32]byte - outputHashEpochSiblings [][32]byte - ) - - for _, hash := range proof.OutputHashInOutputHashesSiblings { - outputHashOutputSiblings = append(outputHashOutputSiblings, [32]byte(hash)) - } - - for _, hash := range proof.OutputHashesInEpochSiblings { - outputHashEpochSiblings = append(outputHashEpochSiblings, [32]byte(hash)) +func ConvertToContractProof(proof *Proof) *application.OutputValidityProof { + return &application.OutputValidityProof{ + // implement this once we have the new GraphQL schema } - - outputValidityProof := contracts.OutputValidityProof{ - InputIndexWithinEpoch: uint64(proof.InputIndexWithinEpoch), - OutputIndexWithinInput: uint64(proof.OutputIndexWithinInput), - OutputHashesRootHash: [32]byte(proof.OutputHashesRootHash), - VouchersEpochRootHash: [32]byte(proof.VouchersEpochRootHash), - NoticesEpochRootHash: [32]byte(proof.NoticesEpochRootHash), - MachineStateHash: [32]byte(proof.MachineStateHash), - OutputHashInOutputHashesSiblings: outputHashOutputSiblings, - OutputHashesInEpochSiblings: outputHashEpochSiblings, - } - - contractProof := contracts.Proof{ - Validity: outputValidityProof, - Context: proof.Context, - } - - return &contractProof } diff --git a/rollups-contracts b/rollups-contracts index 77ff2c089..4b03ced84 160000 --- a/rollups-contracts +++ b/rollups-contracts @@ -1 +1 @@ -Subproject commit 77ff2c08928b871348b6e2bf3b6f5d7f3c95c9dc +Subproject commit 4b03ced840668ca6fc98780c8104708fd775b188 diff --git a/setup_env.sh b/setup_env.sh index c08f926c6..45c932e45 100644 --- a/setup_env.sh +++ b/setup_env.sh @@ -9,11 +9,10 @@ export CARTESI_BLOCKCHAIN_WS_ENDPOINT="ws://localhost:8545" export CARTESI_BLOCKCHAIN_IS_LEGACY="false" export CARTESI_BLOCKCHAIN_FINALITY_OFFSET="1" export CARTESI_BLOCKCHAIN_BLOCK_TIMEOUT="60" -export CARTESI_CONTRACTS_APPLICATION_ADDRESS="0x7C54E3f7A8070a54223469965A871fB8f6f88c22" -export CARTESI_CONTRACTS_HISTORY_ADDRESS="0x325272217ae6815b494bF38cED004c5Eb8a7CdA7" -export CARTESI_CONTRACTS_AUTHORITY_ADDRESS="0x58c93F83fb3304730C95aad2E360cdb88b782010" -export CARTESI_CONTRACTS_INPUT_BOX_ADDRESS="0x59b22D57D4f067708AB0c00552767405926dc768" -export CARTESI_CONTRACTS_INPUT_BOX_DEPLOYMENT_BLOCK_NUMBER="20" +export CARTESI_CONTRACTS_APPLICATION_ADDRESS="0xb72c832dDeA10326143831F1E5F1646920C9c990" +export CARTESI_CONTRACTS_ICONSENSUS_ADDRESS="0x77e5a5fb18F72b5106621f66C704c006c6dB4578" +export CARTESI_CONTRACTS_INPUT_BOX_ADDRESS="0xA1b8EB1F13d8D5Db976a653BbDF8972cfD14691C" +export CARTESI_CONTRACTS_INPUT_BOX_DEPLOYMENT_BLOCK_NUMBER="16" export CARTESI_SNAPSHOT_DIR="$PWD/machine-snapshot" export CARTESI_AUTH_KIND="mnemonic" export CARTESI_AUTH_MNEMONIC="test test test test test test test test test test test junk" diff --git a/test/config.go b/test/config.go index 9c5f742ab..41a977810 100644 --- a/test/config.go +++ b/test/config.go @@ -14,8 +14,7 @@ import ( const ( LocalBlockchainID = 31337 - LocalApplicationDeploymentBlockNumber = 20 - LocalInputBoxDeploymentBlockNumber = 20 + LocalInputBoxDeploymentBlockNumber = 16 LocalHttpAddress = "0.0.0.0" LocalHttpPort = 10000 LocalBlockTimeout = 120 @@ -52,9 +51,8 @@ func NewLocalNodeConfig(localPostgresEnpoint string, localBlockchainHttpEndpoint nodeConfig.BlockchainBlockTimeout = LocalBlockTimeout //Contracts - nodeConfig.ContractsHistoryAddress = book.HistoryAddress.Hex() - nodeConfig.ContractsAuthorityAddress = book.AuthorityAddress.Hex() - nodeConfig.ContractsApplicationAddress = book.CartesiDApp.Hex() + nodeConfig.ContractsApplicationAddress = book.Application.Hex() + nodeConfig.ContractsIConsensusAddress = book.Authority.Hex() nodeConfig.ContractsInputBoxAddress = book.InputBox.Hex() nodeConfig.ContractsInputBoxDeploymentBlockNumber = LocalInputBoxDeploymentBlockNumber From 0be530f01d02bb824ddefad335c2a0324d59eeb1 Mon Sep 17 00:00:00 2001 From: Gabriel de Quadros Ligneul Date: Thu, 9 May 2024 15:30:23 -0300 Subject: [PATCH 05/34] fix(claimer): parse iconsensus address --- cmd/authority-claimer/src/config.rs | 11 +++--- .../src/rollups_events/common.rs | 37 +++++++++++++------ .../src/rollups_events/mod.rs | 2 +- 3 files changed, 33 insertions(+), 17 deletions(-) diff --git a/cmd/authority-claimer/src/config.rs b/cmd/authority-claimer/src/config.rs index 3b0586c57..0edd348b4 100644 --- a/cmd/authority-claimer/src/config.rs +++ b/cmd/authority-claimer/src/config.rs @@ -4,7 +4,7 @@ use crate::{ log::{LogConfig, LogEnvCliConfig}, redacted::Redacted, - rollups_events::{Address, BrokerCLIConfig, BrokerConfig}, + rollups_events::{Address, BrokerCLIConfig, BrokerConfig, HexArrayError}, }; use clap::{command, Parser}; use eth_tx_manager::{ @@ -25,7 +25,7 @@ pub enum AuthorityClaimerConfigError { TxManager { source: TxManagerConfigError }, #[snafu(display("parse IConsensus address error"))] - ParseIConsensusAddress { source: serde_json::Error }, + ParseIConsensusAddress { source: HexArrayError }, #[snafu(display("Missing auth configuration"))] AuthConfigMissing, @@ -87,9 +87,10 @@ impl Config { let log_config = LogConfig::initialize(cli_config.log_config); - let iconsensus_address = - serde_json::from_str(&cli_config.iconsensus_address) - .context(ParseIConsensusAddressSnafu)?; + let iconsensus_address = cli_config + .iconsensus_address + .try_into() + .context(ParseIConsensusAddressSnafu)?; Ok(Config { tx_manager_config, diff --git a/cmd/authority-claimer/src/rollups_events/common.rs b/cmd/authority-claimer/src/rollups_events/common.rs index 33c9e8fa4..47b2cda9e 100644 --- a/cmd/authority-claimer/src/rollups_events/common.rs +++ b/cmd/authority-claimer/src/rollups_events/common.rs @@ -4,6 +4,7 @@ use base64::{engine::general_purpose::STANDARD as base64_engine, Engine as _}; use prometheus_client::encoding::{EncodeLabelValue, LabelValueEncoder}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; +use snafu::{ResultExt, Snafu}; use std::fmt::Write; pub const ADDRESS_SIZE: usize = 20; @@ -39,6 +40,29 @@ impl From<[u8; N]> for HexArray { } } +#[derive(Debug, Snafu)] +pub enum HexArrayError { + #[snafu(display("hex decode error"))] + HexDecode { source: hex::FromHexError }, + + #[snafu(display("incorrect array size"))] + ArraySize, +} + +impl TryFrom for HexArray { + type Error = HexArrayError; + + fn try_from(mut string_data: String) -> Result { + // The hex crate doesn't decode '0x' at the start, so we treat the value before decoding + if string_data[..2].eq("0x") { + string_data.drain(..2); + } + let vec_data = hex::decode(string_data).context(HexDecodeSnafu)?; + let data = vec_data.try_into().or(Err(HexArrayError::ArraySize))?; + Ok(Self::new(data)) + } +} + impl Serialize for HexArray { fn serialize(&self, serializer: S) -> Result where @@ -53,18 +77,9 @@ impl<'de, const N: usize> Deserialize<'de> for HexArray { where D: Deserializer<'de>, { - let mut string_data = String::deserialize(deserializer)?; - // The hex crate doesn't decode '0x' at the start, so we treat the value before decoding - if string_data[..2].eq("0x") { - string_data.drain(..2); - } - let vec_data = hex::decode(string_data).map_err(|e| { + String::deserialize(deserializer)?.try_into().map_err(|e| { serde::de::Error::custom(format!("fail to decode hex ({})", e)) - })?; - let data = vec_data - .try_into() - .or(Err(serde::de::Error::custom("incorrect array size")))?; - Ok(Self::new(data)) + }) } } diff --git a/cmd/authority-claimer/src/rollups_events/mod.rs b/cmd/authority-claimer/src/rollups_events/mod.rs index 17a8f04af..bc4506b02 100644 --- a/cmd/authority-claimer/src/rollups_events/mod.rs +++ b/cmd/authority-claimer/src/rollups_events/mod.rs @@ -10,6 +10,6 @@ pub use broker::{ Broker, BrokerCLIConfig, BrokerConfig, BrokerError, BrokerStream, INITIAL_ID, }; -pub use common::{Address, Hash}; +pub use common::{Address, Hash, HexArrayError}; pub use rollups_claims::{RollupsClaim, RollupsClaimsStream}; pub use rollups_stream::DAppMetadata; From 819d8540e19d3a47b0a8b932a07dacbea73a779b Mon Sep 17 00:00:00 2001 From: Marcel Moura Date: Fri, 12 Apr 2024 16:33:29 -0300 Subject: [PATCH 06/34] fix(test): remove config for host mode --- test/config.go | 1 - 1 file changed, 1 deletion(-) diff --git a/test/config.go b/test/config.go index 41a977810..8f296f87f 100644 --- a/test/config.go +++ b/test/config.go @@ -61,7 +61,6 @@ func NewLocalNodeConfig(localPostgresEnpoint string, localBlockchainHttpEndpoint nodeConfig.HttpPort = LocalHttpPort //Features - nodeConfig.FeatureHostMode = false nodeConfig.FeatureDisableClaimer = false nodeConfig.FeatureDisableMachineHashCheck = false From c8239bfdfdee7f31b282e0c6dd9c802b0732e9f5 Mon Sep 17 00:00:00 2001 From: Marcel Moura Date: Fri, 12 Apr 2024 16:50:47 -0300 Subject: [PATCH 07/34] chore(test): run end-to-end tests via make --- Makefile | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index dec80d07e..247ae1173 100644 --- a/Makefile +++ b/Makefile @@ -6,10 +6,18 @@ submodules: ## Download the git submodules @git submodule update --init --recursive .PHONY: test -test: ## Execute the node tests - @echo "Running the tests" +test: unit-test e2e-test ## Execute all tests + +.PHONY: unit-test +unit-test:## Execute unit tests + @echo "Running unit tests" @go test ./... +.PHONY: e2e-test +e2e-test: ## Execute e2e tests + @echo "Running end-to-end tests" + @go test -count=1 ./test --tags=endtoendtests + .PHONY: lint lint: ## Run the linter @echo "Running the linter" From ab0dc627a7ccff55887001b48e2e5b9bdc7bb3c3 Mon Sep 17 00:00:00 2001 From: Francisco Moura Date: Thu, 2 May 2024 22:27:50 -0300 Subject: [PATCH 08/34] chore: Bump Rust to 1.78.0 Added general '-A clippy::mixed_attributes_style' to CI clippy execution to avoid linter warning on the automatically generated file 'iconsensus.rs' --- .github/workflows/build.yml | 2 +- cmd/authority-claimer/rust-toolchain.toml | 2 ++ cmd/authority-claimer/src/sender.rs | 5 +---- cmd/authority-claimer/src/signer/mod.rs | 2 +- 4 files changed, 5 insertions(+), 6 deletions(-) create mode 100644 cmd/authority-claimer/rust-toolchain.toml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6e149a29e..6efeb0ccf 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -112,7 +112,7 @@ jobs: run: cargo fmt --all -- --check - name: Run linter - run: cargo clippy -- -A clippy::module_inception + run: cargo clippy -- -A clippy::module_inception -A clippy::mixed_attributes_style - name: Build binaries and tests run: cargo build --all-targets diff --git a/cmd/authority-claimer/rust-toolchain.toml b/cmd/authority-claimer/rust-toolchain.toml new file mode 100644 index 000000000..51985806f --- /dev/null +++ b/cmd/authority-claimer/rust-toolchain.toml @@ -0,0 +1,2 @@ +[toolchain] +channel = "1.78.0" diff --git a/cmd/authority-claimer/src/sender.rs b/cmd/authority-claimer/src/sender.rs index 0edc16f42..a5f4ed937 100644 --- a/cmd/authority-claimer/src/sender.rs +++ b/cmd/authority-claimer/src/sender.rs @@ -5,7 +5,7 @@ use crate::{ contracts::iconsensus::{IConsensus, InputRange}, metrics::AuthorityClaimerMetrics, rollups_events::{Address, DAppMetadata, RollupsClaim}, - signer::{ConditionalSigner, ConditionalSignerError}, + signer::ConditionalSigner, }; use async_trait::async_trait; use eth_tx_manager::{ @@ -89,9 +89,6 @@ pub enum TransactionSenderError { #[snafu(display("Invalid provider URL"))] ProviderUrl { source: ParseError }, - #[snafu(display("Failed to initialize the transaction signer"))] - Signer { source: ConditionalSignerError }, - #[snafu(display("Transaction manager error"))] TransactionManager { source: TrasactionManagerError }, diff --git a/cmd/authority-claimer/src/signer/mod.rs b/cmd/authority-claimer/src/signer/mod.rs index 37d4c9885..57036b42a 100644 --- a/cmd/authority-claimer/src/signer/mod.rs +++ b/cmd/authority-claimer/src/signer/mod.rs @@ -5,4 +5,4 @@ mod aws_credentials; mod aws_signer; mod signer; -pub use signer::{ConditionalSigner, ConditionalSignerError}; +pub use signer::ConditionalSigner; From a9d9a879694a74e0396a20a1b147b856f232a7b4 Mon Sep 17 00:00:00 2001 From: Francisco Moura Date: Thu, 9 May 2024 15:48:35 -0300 Subject: [PATCH 09/34] feat: Add CI Custom Image --- .github/workflows/build.yml | 52 +++++++++++++++++++++++++++ .github/workflows/clean-up-images.yml | 1 + build/Dockerfile | 17 +++++++++ build/docker-bake.hcl | 7 ++++ build/docker-bake.override.hcl | 4 +++ 5 files changed, 81 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 6efeb0ccf..f0b157164 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -126,8 +126,57 @@ jobs: - name: Run tests run: cargo test + build-ci-base: + runs-on: ubuntu-22.04 + outputs: + output: ${{ steps.export_tag.outputs.image_tag }} + steps: + - uses: actions/checkout@v4 + with: + submodules: recursive + + - name: Docker meta + id: docker_meta + uses: docker/metadata-action@v5 + with: + images: | + name=ghcr.io/cartesi/rollups-node-ci-base + tags: | + type=semver,pattern={{version}} + type=ref,event=branch + type=ref,event=pr + + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - uses: depot/setup-action@v1 + - name: Build and push docker image + id: docker_build + uses: depot/bake-action@v1 + with: + files: | + ./docker-bake.hcl + ${{ steps.docker_meta.outputs.bake-file }} + ./docker-bake.platforms.hcl + targets: rollups-node-ci-base + push: true + project: ${{ vars.DEPOT_PROJECT }} + workdir: build + + - name: Export Image Tag + id : export_tag + run : echo "image_tag=${{steps.docker_meta.outputs.version}}" >> "$GITHUB_OUTPUT" + test-go: runs-on: ubuntu-22.04 + container: + image: ghcr.io/cartesi/rollups-node-ci-base:${{needs.build-ci-base.outputs.output}} + needs: + - build-ci-base steps: - uses: actions/checkout@v4 with: @@ -153,6 +202,9 @@ jobs: with: go-version-file: 'go.mod' + - name: Fix VCS Go Linter Issue + run : git config --global --add safe.directory /__w/rollups-node/rollups-node + - name: Run Go Linter uses: golangci/golangci-lint-action@v6 with: diff --git a/.github/workflows/clean-up-images.yml b/.github/workflows/clean-up-images.yml index d0bed5d4e..d3d030c78 100644 --- a/.github/workflows/clean-up-images.yml +++ b/.github/workflows/clean-up-images.yml @@ -17,6 +17,7 @@ jobs: matrix: image: - rollups-node + - rollups-node-ci-base steps: - uses: vlaurin/action-ghcr-prune@v0.6.0 with: diff --git a/build/Dockerfile b/build/Dockerfile index 0c33b1b31..23b8e4e3a 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -331,3 +331,20 @@ USER cartesi # Set the Go supervisor as the command. CMD [ "cartesi-rollups-node" ] + + +# STAGE: rollups-ci-base +# +# This stage prepares the base CI image +FROM ${BASE_IMAGE} as rollups-node-ci-base + +# Install Git and Docker +RUN < /dev/null + apt-get update + apt-get install -y --no-install-recommends docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin +EOF \ No newline at end of file diff --git a/build/docker-bake.hcl b/build/docker-bake.hcl index 9eb669ff0..97d82d813 100644 --- a/build/docker-bake.hcl +++ b/build/docker-bake.hcl @@ -45,3 +45,10 @@ target "rollups-node-devnet" { inherits = ["common"] target = "rollups-node-devnet" } + +target "rollups-node-ci-base" { + inherits = ["common"] + target = "rollups-node-ci-base" + dockerfile = "./Dockerfile" + context = "." +} diff --git a/build/docker-bake.override.hcl b/build/docker-bake.override.hcl index fe1b35a4c..d2204f478 100644 --- a/build/docker-bake.override.hcl +++ b/build/docker-bake.override.hcl @@ -20,3 +20,7 @@ target "rollups-node-snapshot" { target "rollups-node-devnet" { tags = ["${DOCKER_ORGANIZATION}/rollups-node-devnet:${TAG}"] } + +target "rollups-node-ci-base" { + tags = ["${DOCKER_ORGANIZATION}/rollups-node-ci-base:${TAG}"] +} From c7193114c7ef6fe254453d96b735a8b4de351a89 Mon Sep 17 00:00:00 2001 From: Francisco Moura Date: Mon, 20 May 2024 19:54:15 -0300 Subject: [PATCH 10/34] feat: Add --no-mining opt to Devnet --- cmd/cartesi-rollups-cli/root/deps/deps.go | 6 ++- cmd/cartesi-rollups-cli/root/mine/mine.go | 41 +++++++++++++++++++ cmd/cartesi-rollups-cli/root/root.go | 2 + docs/cli/cartesi-rollups-cli.md | 1 + docs/cli/cartesi-rollups-cli_mine.md | 26 ++++++++++++ docs/cli/cartesi-rollups-cli_run-deps.md | 1 + internal/deps/deps.go | 48 +++++++++++++++-------- internal/node/machinehash_test.go | 4 +- pkg/ethutil/ethutil.go | 26 +++++++++++- pkg/ethutil/ethutil_test.go | 28 ++++++++----- test/echo_test.go | 2 +- 11 files changed, 153 insertions(+), 32 deletions(-) create mode 100644 cmd/cartesi-rollups-cli/root/mine/mine.go create mode 100644 docs/cli/cartesi-rollups-cli_mine.md diff --git a/cmd/cartesi-rollups-cli/root/deps/deps.go b/cmd/cartesi-rollups-cli/root/deps/deps.go index e262bab80..dfb2917a4 100644 --- a/cmd/cartesi-rollups-cli/root/deps/deps.go +++ b/cmd/cartesi-rollups-cli/root/deps/deps.go @@ -51,9 +51,13 @@ func init() { "Devnet local listening port number") Cmd.Flags().StringVar(&depsConfig.Devnet.BlockTime, "devnet-block-time", - deps.DefaultBlockTime, + deps.DefaultDevnetBlockTime, "Devnet mining block time") + Cmd.Flags().BoolVar(&depsConfig.Devnet.NoMining, "devnet-no-mining", + deps.DefaultDevnetNoMining, + "Devnet disable mining") + Cmd.Flags().BoolVarP(&verbose, "verbose", "v", false, "verbose logs") } diff --git a/cmd/cartesi-rollups-cli/root/mine/mine.go b/cmd/cartesi-rollups-cli/root/mine/mine.go new file mode 100644 index 000000000..2c1969544 --- /dev/null +++ b/cmd/cartesi-rollups-cli/root/mine/mine.go @@ -0,0 +1,41 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package mine + +import ( + "context" + "log/slog" + + "github.com/cartesi/rollups-node/pkg/ethutil" + "github.com/spf13/cobra" +) + +var Cmd = &cobra.Command{ + Use: "mine", + Short: "Mine a new block", + Example: examples, + Run: run, +} + +const examples = `# Mine a new block: +cartesi-rollups-cli mine` + +var ( + anvilEndpoint string +) + +func init() { + + Cmd.Flags().StringVar(&anvilEndpoint, "anvil-endpoint", "http://localhost:8545", + "address of anvil endpoint to be used to send the mining request") +} + +func run(cmd *cobra.Command, args []string) { + + blockNumber, err := ethutil.MineNewBlock(context.Background(), anvilEndpoint) + + cobra.CheckErr(err) + + slog.Info("Ok", "block number", blockNumber) +} diff --git a/cmd/cartesi-rollups-cli/root/root.go b/cmd/cartesi-rollups-cli/root/root.go index 6d4c63678..7a612fc69 100644 --- a/cmd/cartesi-rollups-cli/root/root.go +++ b/cmd/cartesi-rollups-cli/root/root.go @@ -8,6 +8,7 @@ import ( "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/execute" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/increasetime" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/inspect" + "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/mine" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/read" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/savesnapshot" "github.com/cartesi/rollups-node/cmd/cartesi-rollups-cli/root/send" @@ -31,5 +32,6 @@ func init() { Cmd.AddCommand(validate.Cmd) Cmd.AddCommand(deps.Cmd) Cmd.AddCommand(execute.Cmd) + Cmd.AddCommand(mine.Cmd) Cmd.DisableAutoGenTag = true } diff --git a/docs/cli/cartesi-rollups-cli.md b/docs/cli/cartesi-rollups-cli.md index 77ac81ca6..b70bba14a 100644 --- a/docs/cli/cartesi-rollups-cli.md +++ b/docs/cli/cartesi-rollups-cli.md @@ -18,6 +18,7 @@ Cartesi Rollups node. * [cartesi-rollups-cli execute](cartesi-rollups-cli_execute.md) - Executes a voucher * [cartesi-rollups-cli increase-time](cartesi-rollups-cli_increase-time.md) - Increases evm time of the current machine * [cartesi-rollups-cli inspect](cartesi-rollups-cli_inspect.md) - Calls inspect API +* [cartesi-rollups-cli mine](cartesi-rollups-cli_mine.md) - Mine a new block * [cartesi-rollups-cli read](cartesi-rollups-cli_read.md) - Read the node state from the GraphQL API * [cartesi-rollups-cli run-deps](cartesi-rollups-cli_run-deps.md) - Run node dependencies with Docker * [cartesi-rollups-cli save-snapshot](cartesi-rollups-cli_save-snapshot.md) - Saves the testing Cartesi machine snapshot to the designated folder diff --git a/docs/cli/cartesi-rollups-cli_mine.md b/docs/cli/cartesi-rollups-cli_mine.md new file mode 100644 index 000000000..3995ca3f6 --- /dev/null +++ b/docs/cli/cartesi-rollups-cli_mine.md @@ -0,0 +1,26 @@ +## cartesi-rollups-cli mine + +Mine a new block + +``` +cartesi-rollups-cli mine [flags] +``` + +### Examples + +``` +# Mine a new block: +cartesi-rollups-cli mine +``` + +### Options + +``` + --anvil-endpoint string address of anvil endpoint to be used to send the mining request (default "http://localhost:8545") + -h, --help help for mine +``` + +### SEE ALSO + +* [cartesi-rollups-cli](cartesi-rollups-cli.md) - Command line interface for Cartesi Rollups + diff --git a/docs/cli/cartesi-rollups-cli_run-deps.md b/docs/cli/cartesi-rollups-cli_run-deps.md index 333808433..fca0efb30 100644 --- a/docs/cli/cartesi-rollups-cli_run-deps.md +++ b/docs/cli/cartesi-rollups-cli_run-deps.md @@ -19,6 +19,7 @@ cartesi-rollups-cli run-deps --devnet-block-time string Devnet mining block time (default "1") --devnet-docker-image string Devnet docker image name (default "cartesi/rollups-node-devnet:devel") --devnet-mapped-port string Devnet local listening port number (default "8545") + --devnet-no-mining Devnet disable mining -h, --help help for run-deps --postgres-docker-image string Postgress docker image name (default "postgres:16-alpine") --postgres-mapped-port string Postgres local listening port number (default "5432") diff --git a/internal/deps/deps.go b/internal/deps/deps.go index 194cb49dd..7742fb830 100644 --- a/internal/deps/deps.go +++ b/internal/deps/deps.go @@ -18,15 +18,16 @@ import ( ) const ( - DefaultPostgresDatabase = "postgres" - DefaultPostgresDockerImage = "postgres:16-alpine" - DefaultPostgresPort = "5432" - DefaultPostgresUser = "postgres" - DefaultPostgresPassword = "password" - DefaultDevnetDockerImage = "cartesi/rollups-node-devnet:devel" - DefaultDevnetPort = "8545" - DefaultBlockTime = "1" - DefaultBlockToWaitForOnStartup = "21" + DefaultPostgresDatabase = "postgres" + DefaultPostgresDockerImage = "postgres:16-alpine" + DefaultPostgresPort = "5432" + DefaultPostgresUser = "postgres" + DefaultPostgresPassword = "password" + DefaultDevnetDockerImage = "cartesi/rollups-node-devnet:devel" + DefaultDevnetPort = "8545" + DefaultDevnetBlockTime = "1" + DefaultDevnetBlockToWaitForOnStartup = "21" + DefaultDevnetNoMining = false numPostgresCheckReadyAttempts = 2 pollInterval = 5 * time.Second @@ -54,6 +55,7 @@ type DevnetConfig struct { Port string BlockTime string BlockToWaitForOnStartup string + NoMining bool } // Builds a DepsConfig struct with default values @@ -67,8 +69,9 @@ func NewDefaultDepsConfig() *DepsConfig { &DevnetConfig{ DefaultDevnetDockerImage, DefaultDevnetPort, - DefaultBlockTime, - DefaultBlockToWaitForOnStartup, + DefaultDevnetBlockTime, + DefaultDevnetBlockToWaitForOnStartup, + DefaultDevnetNoMining, }, } } @@ -206,12 +209,25 @@ func Run(ctx context.Context, depsConfig DepsConfig) (*DepsContainers, error) { devnetExposedPort = strings.Join([]string{ depsConfig.Devnet.Port, ":", devnetExposedPort}, "") } + cmd := []string{ + "anvil", + "--load-state", + "/usr/share/devnet/anvil_state.json", + } + var waitStrategy *wait.LogStrategy + if depsConfig.Devnet.NoMining { + cmd = append(cmd, "--no-mining") + waitStrategy = wait.ForLog("net_listening") + } else { + cmd = append(cmd, "--block-time", + depsConfig.Devnet.BlockTime) + waitStrategy = wait.ForLog("Block Number: " + depsConfig.Devnet.BlockToWaitForOnStartup) + } devNetReq := testcontainers.ContainerRequest{ - Image: depsConfig.Devnet.DockerImage, - ExposedPorts: []string{devnetExposedPort}, - WaitingFor: wait.ForLog("Block Number: " + depsConfig.Devnet.BlockToWaitForOnStartup), - Cmd: []string{"anvil", "--block-time", - depsConfig.Devnet.BlockTime, "--load-state", "/usr/share/devnet/anvil_state.json"}, + Image: depsConfig.Devnet.DockerImage, + ExposedPorts: []string{devnetExposedPort}, + WaitingFor: waitStrategy, + Cmd: cmd, LifecycleHooks: createHook(&finishedWaitGroup), } devnet, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ diff --git a/internal/node/machinehash_test.go b/internal/node/machinehash_test.go index ff2d13cf3..c1aefc8fb 100644 --- a/internal/node/machinehash_test.go +++ b/internal/node/machinehash_test.go @@ -138,8 +138,8 @@ func startDevnet() (*deps.DepsContainers, error) { container, err := deps.Run(context.Background(), deps.DepsConfig{ Devnet: &deps.DevnetConfig{ DockerImage: deps.DefaultDevnetDockerImage, - BlockTime: deps.DefaultBlockTime, - BlockToWaitForOnStartup: deps.DefaultBlockToWaitForOnStartup, + BlockTime: deps.DefaultDevnetBlockTime, + BlockToWaitForOnStartup: deps.DefaultDevnetBlockToWaitForOnStartup, Port: testutil.GetCartesiTestDepsPortRange(), }, }) diff --git a/pkg/ethutil/ethutil.go b/pkg/ethutil/ethutil.go index 1ec16f1a1..75e70cdcc 100644 --- a/pkg/ethutil/ethutil.go +++ b/pkg/ethutil/ethutil.go @@ -199,14 +199,36 @@ func AdvanceDevnetTime(ctx context.Context, // Sets the timestamp for the next block at Devnet func SetNextDevnetBlockTimestamp( ctx context.Context, - blockchainHttpEnpoint string, + blockchainHttpEndpoint string, timestamp int64, ) error { - client, err := rpc.DialContext(ctx, blockchainHttpEnpoint) + client, err := rpc.DialContext(ctx, blockchainHttpEndpoint) if err != nil { return err } defer client.Close() return client.CallContext(ctx, nil, "evm_setNextBlockTimestamp", timestamp) } + +// Mines a new block +func MineNewBlock( + ctx context.Context, + blockchainHttpEndpoint string, +) (uint64, error) { + client, err := rpc.DialContext(ctx, blockchainHttpEndpoint) + if err != nil { + return 0, err + } + defer client.Close() + err = client.CallContext(ctx, nil, "evm_mine") + if err != nil { + return 0, err + } + ethClient, err := ethclient.DialContext(ctx, blockchainHttpEndpoint) + if err != nil { + return 0, err + } + defer ethClient.Close() + return ethClient.BlockNumber(ctx) +} diff --git a/pkg/ethutil/ethutil_test.go b/pkg/ethutil/ethutil_test.go index f06a62b3f..84f125e20 100644 --- a/pkg/ethutil/ethutil_test.go +++ b/pkg/ethutil/ethutil_test.go @@ -24,12 +24,13 @@ const testTimeout = 300 * time.Second // go-ethereum's client. type EthUtilSuite struct { suite.Suite - ctx context.Context - cancel context.CancelFunc - deps *deps.DepsContainers - client *ethclient.Client - signer Signer - book *addresses.Book + ctx context.Context + cancel context.CancelFunc + deps *deps.DepsContainers + client *ethclient.Client + endpoint string + signer Signer + book *addresses.Book } func (s *EthUtilSuite) SetupTest() { @@ -39,10 +40,10 @@ func (s *EthUtilSuite) SetupTest() { s.deps, err = newDevNetContainer(context.Background()) s.Require().Nil(err) - endpoint, err := s.deps.DevnetEndpoint(s.ctx, "ws") + s.endpoint, err = s.deps.DevnetEndpoint(s.ctx, "ws") s.Require().Nil(err) - s.client, err = ethclient.DialContext(s.ctx, endpoint) + s.client, err = ethclient.DialContext(s.ctx, s.endpoint) s.Require().Nil(err) s.signer, err = NewMnemonicSigner(s.ctx, s.client, FoundryMnemonic, 0) @@ -83,6 +84,13 @@ func (s *EthUtilSuite) TestAddInput() { s.Require().Equal(payload, inputArgs["payload"]) } +func (s *EthUtilSuite) TestMineNewBlock() { + blockNumber, err := MineNewBlock(s.ctx, s.endpoint) + s.Require().Nil(err) + s.Require().Equal(uint64(22), blockNumber) + +} + // Log the output of the given container func (s *EthUtilSuite) logDevnetOutput() { reader, err := s.deps.DevnetLogs(s.ctx) @@ -105,8 +113,8 @@ func newDevNetContainer(ctx context.Context) (*deps.DepsContainers, error) { container, err := deps.Run(ctx, deps.DepsConfig{ Devnet: &deps.DevnetConfig{ DockerImage: deps.DefaultDevnetDockerImage, - BlockTime: deps.DefaultBlockTime, - BlockToWaitForOnStartup: deps.DefaultBlockToWaitForOnStartup, + BlockTime: deps.DefaultDevnetBlockTime, + BlockToWaitForOnStartup: deps.DefaultDevnetBlockToWaitForOnStartup, Port: testutil.GetCartesiTestDepsPortRange(), }, }) diff --git a/test/echo_test.go b/test/echo_test.go index dfaea7cd9..7b238bb2a 100644 --- a/test/echo_test.go +++ b/test/echo_test.go @@ -71,7 +71,7 @@ func (s *EchoInputTestSuite) SetupTest() { DockerImage: deps.DefaultDevnetDockerImage, Port: testutil.GetCartesiTestDepsPortRange(), BlockTime: devNetMiningBlockTimeInSeconds, - BlockToWaitForOnStartup: deps.DefaultBlockToWaitForOnStartup, + BlockToWaitForOnStartup: deps.DefaultDevnetBlockToWaitForOnStartup, }, } From 60765d95bc6f6c645f8f32074650711fb1a0316d Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Fri, 3 May 2024 14:26:14 -0300 Subject: [PATCH 11/34] feat(graphql): add postraphile to supervisor --- CHANGELOG.md | 6 +++++- build/Dockerfile | 6 +++++- internal/node/handlers.go | 18 ++++++++++++++++++ internal/node/services.go | 23 +++++++++++++++++++++++ 4 files changed, 51 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a6cc1354..cc2673268 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,10 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +### Added + +- Added PostGraphile service + ### Changed - Bumped Rollups Contracts to 2.0 @@ -44,7 +48,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Added verification to ensure `CARTESI_BLOCKCHAIN_ID` matches the id returned from the Ethereum node - Added verification to ensure the Cartesi Machine snapshot hash matches the template hash from the CartesiDApp contract - Added support for `CARTESI_AUTH_PRIVATE_KEY` and `CARTESI_AUTH_PRIVATE_KEY_FILE` -- Added `CARTESI_AUTH_KIND` environment variable to select the blockchain authetication method +- Added `CARTESI_AUTH_KIND` environment variable to select the blockchain authentication method - Added structured logging with slog. Colored logs can now be enabled with `CARTESI_LOG_PRETTY` environment variable ### Changed diff --git a/build/Dockerfile b/build/Dockerfile index 23b8e4e3a..a65da65e6 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -307,8 +307,12 @@ apt-get install -y --no-install-recommends \ libboost-filesystem1.81.0 \ libboost-log1.81.0 \ libcrypto++8 \ - procps + procps \ + nodejs \ + npm rm -rf /var/lib/apt/lists/* +npm install -g postgraphile +npm install -g --save @graphile-contrib/pg-simplify-inflector EOF # Copy Rust binaries. diff --git a/internal/node/handlers.go b/internal/node/handlers.go index cae5c545a..3fddbc8f7 100644 --- a/internal/node/handlers.go +++ b/internal/node/handlers.go @@ -4,8 +4,11 @@ package node import ( + "fmt" "log/slog" "net/http" + "net/http/httputil" + "net/url" "github.com/cartesi/rollups-node/internal/node/config" ) @@ -13,6 +16,11 @@ import ( func newHttpServiceHandler(c config.NodeConfig) http.Handler { handler := http.NewServeMux() handler.Handle("/healthz", http.HandlerFunc(healthcheckHandler)) + + graphqlProxy := newReverseProxy(c.HttpAddress, getPort(c, portOffsetPostgraphile)) + handler.Handle("/graphql", graphqlProxy) + handler.Handle("/graphiql", graphqlProxy) + return handler } @@ -20,3 +28,13 @@ func healthcheckHandler(w http.ResponseWriter, r *http.Request) { slog.Debug("Node received a healthcheck request") w.WriteHeader(http.StatusOK) } + +func newReverseProxy(address string, port int) *httputil.ReverseProxy { + urlStr := fmt.Sprintf("http://%v:%v/", address, port) + url, err := url.Parse(urlStr) + if err != nil { + panic(fmt.Sprintf("failed to parse url: %v", err)) + } + proxy := httputil.NewSingleHostReverseProxy(url) + return proxy +} diff --git a/internal/node/services.go b/internal/node/services.go index b769f508a..1a49b8277 100644 --- a/internal/node/services.go +++ b/internal/node/services.go @@ -19,6 +19,7 @@ const ( portOffsetProxy = iota portOffsetAuthorityClaimer portOffsetRedis + portOffsetPostgraphile ) const localhost = "127.0.0.1" @@ -123,6 +124,7 @@ func newSupervisorService(c config.NodeConfig, workDir string) services.Supervis } s = append(s, newHttpService(c)) + s = append(s, newPostgraphileService(c, workDir)) supervisor := services.SupervisorService{ Name: "rollups-node", @@ -140,3 +142,24 @@ func newHttpService(c config.NodeConfig) services.HttpService { Handler: handler, } } + +func newPostgraphileService(c config.NodeConfig, workDir string) services.CommandService { + var s services.CommandService + s.Name = "postgraphile" + s.HealthcheckPort = getPort(c, portOffsetPostgraphile) + s.Path = "postgraphile" + s.Args = append(s.Args, "--retry-on-init-fail") + s.Args = append(s.Args, "--dynamic-json") + s.Args = append(s.Args, "--no-setof-functions-contain-nulls") + s.Args = append(s.Args, "--no-ignore-rbac") + s.Args = append(s.Args, "--enable-query-batching") + s.Args = append(s.Args, "--extended-errors", "errcode") + s.Args = append(s.Args, "--append-plugins", "@graphile-contrib/pg-simplify-inflector") + s.Args = append(s.Args, "--legacy-relations", "omit") + s.Args = append(s.Args, "--connection", fmt.Sprintf("%v", c.PostgresEndpoint.Value)) + s.Args = append(s.Args, "--schema", "public") + s.Args = append(s.Args, "--port", fmt.Sprint(getPort(c, portOffsetPostgraphile))) + s.Env = append(s.Env, os.Environ()...) + s.WorkDir = workDir + return s +} From b0929b10cf755ec2c7012b8877c250773180a42d Mon Sep 17 00:00:00 2001 From: Renan Santos Date: Tue, 21 May 2024 16:00:05 -0300 Subject: [PATCH 12/34] refactor: extract linewriter to package --- internal/{services => linewriter}/linewriter.go | 14 +++++++------- .../{services => linewriter}/linewriter_test.go | 6 +++--- internal/services/command.go | 6 ++++-- 3 files changed, 14 insertions(+), 12 deletions(-) rename internal/{services => linewriter}/linewriter.go (72%) rename internal/{services => linewriter}/linewriter_test.go (97%) diff --git a/internal/services/linewriter.go b/internal/linewriter/linewriter.go similarity index 72% rename from internal/services/linewriter.go rename to internal/linewriter/linewriter.go index 44dc3e482..6f610a676 100644 --- a/internal/services/linewriter.go +++ b/internal/linewriter/linewriter.go @@ -1,28 +1,28 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -package services +package linewriter import ( "bytes" "io" ) -// lineWriter accumulates the received data in a buffer and writes it to the inner writer when it +// LineWriter accumulates the received data in a buffer and writes it to the inner writer when it // encounters a new line, ignoring empty lines in the process. -// lineWriter assumes the inner writer does not returns an error. -type lineWriter struct { +// LineWriter assumes the inner writer does not returns an error. +type LineWriter struct { inner io.Writer buffer bytes.Buffer } -func newLineWriter(inner io.Writer) *lineWriter { - return &lineWriter{ +func New(inner io.Writer) *LineWriter { + return &LineWriter{ inner: inner, } } -func (w *lineWriter) Write(data []byte) (int, error) { +func (w *LineWriter) Write(data []byte) (int, error) { _, err := w.buffer.Write(data) if err != nil { // Not possible given bytes.Buffer spec diff --git a/internal/services/linewriter_test.go b/internal/linewriter/linewriter_test.go similarity index 97% rename from internal/services/linewriter_test.go rename to internal/linewriter/linewriter_test.go index a65481cc2..c87443068 100644 --- a/internal/services/linewriter_test.go +++ b/internal/linewriter/linewriter_test.go @@ -1,7 +1,7 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -package services +package linewriter import ( "bytes" @@ -24,7 +24,7 @@ func (w *mockWriter) Write(p []byte) (int, error) { type LineWriterSuite struct { suite.Suite mock *mockWriter - writer *lineWriter + writer *LineWriter } func TestLineWriterSuite(t *testing.T) { @@ -33,7 +33,7 @@ func TestLineWriterSuite(t *testing.T) { func (s *LineWriterSuite) SetupTest() { s.mock = &mockWriter{} - s.writer = newLineWriter(s.mock) + s.writer = New(s.mock) } func (s *LineWriterSuite) TestItWritesLines() { diff --git a/internal/services/command.go b/internal/services/command.go index 9eb9aa0d0..04ebba140 100644 --- a/internal/services/command.go +++ b/internal/services/command.go @@ -13,6 +13,8 @@ import ( "strings" "syscall" "time" + + "github.com/cartesi/rollups-node/internal/linewriter" ) const ( @@ -46,8 +48,8 @@ type CommandService struct { func (s CommandService) Start(ctx context.Context, ready chan<- struct{}) error { cmd := exec.CommandContext(ctx, s.Path, s.Args...) cmd.Env = s.Env - cmd.Stderr = newLineWriter(commandLogger{s.Name}) - cmd.Stdout = newLineWriter(commandLogger{s.Name}) + cmd.Stderr = linewriter.New(commandLogger{s.Name}) + cmd.Stdout = linewriter.New(commandLogger{s.Name}) cmd.Cancel = func() error { err := cmd.Process.Signal(syscall.SIGTERM) if err != nil { From b6cb7b00a3221a6c21e4e239529261e373522f83 Mon Sep 17 00:00:00 2001 From: Marcel Moura Date: Mon, 20 May 2024 11:02:36 -0300 Subject: [PATCH 13/34] chore: fix typos --- CHANGELOG.md | 2 +- internal/node/config/generate/main.go | 2 +- internal/services/command_test.go | 4 ++-- internal/services/fakeservice/main.go | 2 +- internal/services/http_test.go | 2 +- pkg/ethutil/ethutil.go | 8 ++++---- test/config.go | 10 +++++----- test/echo_test.go | 12 ++++++------ 8 files changed, 21 insertions(+), 21 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cc2673268..2a5d9a9ff 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -54,7 +54,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Changed `CARTESI_BLOCKCHAIN_ID` type from int to uint64 -- Changed `CARTESI_CONTRACTS_APPLICATION_DEPLOYMENT_BLOCK_NUMBER` type from string to int64. +- Changed `CARTESI_CONTRACTS_APPLICATION_DEPLOYMENT_BLOCK_NUMBER` type from string to int64 - Changed `CARTESI_LOG_LEVEL` option `warning` to `warn` - Bumped Cartesi Emulator SDK to 0.17.1 - Bumped Server Manager to 0.9.1 diff --git a/internal/node/config/generate/main.go b/internal/node/config/generate/main.go index c90fca4c9..6325bbdfa 100644 --- a/internal/node/config/generate/main.go +++ b/internal/node/config/generate/main.go @@ -8,7 +8,7 @@ // - a config.md file with documentation for the environment variables. // // Each table entry in the toml file translates into an environment variable. -// In Go, this becomes a map[string](map[string]Env), with the keys of the outter map being topic +// In Go, this becomes a map[string](map[string]Env), with the keys of the outer map being topic // names, and the keys of the inner map being variable names. package main diff --git a/internal/services/command_test.go b/internal/services/command_test.go index e8e0c5eff..7079c1b2f 100644 --- a/internal/services/command_test.go +++ b/internal/services/command_test.go @@ -35,8 +35,8 @@ func (s *CommandServiceSuite) TearDownSuite() { func (s *CommandServiceSuite) SetupTest() { s.servicePort++ - serviceAdress := "0.0.0.0:" + fmt.Sprint(s.servicePort) - os.Setenv("SERVICE_ADDRESS", serviceAdress) + serviceAddress := "0.0.0.0:" + fmt.Sprint(s.servicePort) + os.Setenv("SERVICE_ADDRESS", serviceAddress) } func (s *CommandServiceSuite) TestItStopsWhenContextIsCancelled() { diff --git a/internal/services/fakeservice/main.go b/internal/services/fakeservice/main.go index ede94ad01..17b7ebeaa 100644 --- a/internal/services/fakeservice/main.go +++ b/internal/services/fakeservice/main.go @@ -1,7 +1,7 @@ // (c) Cartesi and individual authors (see AUTHORS) // SPDX-License-Identifier: Apache-2.0 (see LICENSE) -// This file creates a dummy webserver with the sole pupose of being used +// This file creates a dummy webserver with the sole purpose of being used // as a binary to test the services.Service struct package main diff --git a/internal/services/http_test.go b/internal/services/http_test.go index b8ef59fea..47840fdc1 100644 --- a/internal/services/http_test.go +++ b/internal/services/http_test.go @@ -109,7 +109,7 @@ func (s *HttpServiceSuite) TestItRespondsOngoingRequestsAfterContextIsClosed() { select { case <-ready: case <-time.After(DefaultServiceTimeout): - s.FailNow("timed out wating for HttpService to be ready") + s.FailNow("timed out waiting for HttpService to be ready") } clientResult := make(chan ClientResult, 1) diff --git a/pkg/ethutil/ethutil.go b/pkg/ethutil/ethutil.go index 75e70cdcc..442166ab0 100644 --- a/pkg/ethutil/ethutil.go +++ b/pkg/ethutil/ethutil.go @@ -66,12 +66,12 @@ func AddInput( // This function waits until the transaction is added to a block and return the input index. func AddInputUsingFoundryMnemonic( ctx context.Context, - blockchainHttpEnpoint string, + blockchainHttpEndpoint string, payload string, ) (int, error) { // Send Input - client, err := ethclient.DialContext(ctx, blockchainHttpEnpoint) + client, err := ethclient.DialContext(ctx, blockchainHttpEndpoint) if err != nil { return 0, err } @@ -184,10 +184,10 @@ func ExecuteOutput( // Advances the Devnet timestamp func AdvanceDevnetTime(ctx context.Context, - blockchainHttpEnpoint string, + blockchainHttpEndpoint string, timeInSeconds int, ) error { - client, err := rpc.DialContext(ctx, blockchainHttpEnpoint) + client, err := rpc.DialContext(ctx, blockchainHttpEndpoint) if err != nil { return err } diff --git a/test/config.go b/test/config.go index 8f296f87f..b30e741a2 100644 --- a/test/config.go +++ b/test/config.go @@ -22,8 +22,8 @@ const ( LocalEpochLength = 5 ) -func NewLocalNodeConfig(localPostgresEnpoint string, localBlockchainHttpEndpoint string, - localBlockchainWsEnpoint string, snapshotDir string) config.NodeConfig { +func NewLocalNodeConfig(localPostgresEndpoint string, localBlockchainHttpEndpoint string, + localBlockchainWsEndpoint string, snapshotDir string) config.NodeConfig { var nodeConfig config.NodeConfig @@ -35,17 +35,17 @@ func NewLocalNodeConfig(localPostgresEnpoint string, localBlockchainHttpEndpoint //Postgres nodeConfig.PostgresEndpoint = - config.Redacted[string]{Value: localPostgresEnpoint} + config.Redacted[string]{Value: localPostgresEndpoint} //Epoch nodeConfig.RollupsEpochLength = LocalEpochLength - //Blochain + //Blockchain nodeConfig.BlockchainID = LocalBlockchainID nodeConfig.BlockchainHttpEndpoint = config.Redacted[string]{Value: localBlockchainHttpEndpoint} nodeConfig.BlockchainWsEndpoint = - config.Redacted[string]{Value: localBlockchainWsEnpoint} + config.Redacted[string]{Value: localBlockchainWsEndpoint} nodeConfig.BlockchainIsLegacy = false nodeConfig.BlockchainFinalityOffset = LocalFinalityOffset nodeConfig.BlockchainBlockTimeout = LocalBlockTimeout diff --git a/test/echo_test.go b/test/echo_test.go index 7b238bb2a..8c178e140 100644 --- a/test/echo_test.go +++ b/test/echo_test.go @@ -29,7 +29,7 @@ import ( const ( payload = "0xdeadbeef" maxReadInputAttempts = 10 - blockTimestampinSeconds = 7000000000 + blockTimestampInSeconds = 7000000000 testTimeout = 300 * time.Second devNetAdvanceTimeInSeconds = 120 devNetMiningBlockTimeInSeconds = "2" @@ -53,7 +53,7 @@ func (s *EchoInputTestSuite) SetupTest() { ctx, cancel := context.WithTimeout(context.Background(), testTimeout) - // Create Tempdir + // Create tempdir tempDir, err := os.MkdirTemp("", "echo-test") s.Require().Nil(err) snapshotDir := path.Join(tempDir, "machine-snapshot") @@ -79,10 +79,10 @@ func (s *EchoInputTestSuite) SetupTest() { s.Require().Nil(err) // Capture endpoints - postgressEndpoint, err := depsContainers.PostgresEndpoint(ctx, "postgres") + postgresEndpoint, err := depsContainers.PostgresEndpoint(ctx, "postgres") s.Require().Nil(err) - postgresUrl, err := url.Parse(postgressEndpoint) + postgresUrl, err := url.Parse(postgresEndpoint) s.Require().Nil(err) postgresUrl.User = url.UserPassword(deps.DefaultPostgresUser, deps.DefaultPostgresPassword) @@ -95,8 +95,8 @@ func (s *EchoInputTestSuite) SetupTest() { s.blockchainHttpEndpoint = devnetHttpEndpoint - // Fix the Blochain timestamp. Must be "in the future" - err = ethutil.SetNextDevnetBlockTimestamp(ctx, devnetHttpEndpoint, blockTimestampinSeconds) + // Fix the Blockchain timestamp. Must be "in the future" + err = ethutil.SetNextDevnetBlockTimestamp(ctx, devnetHttpEndpoint, blockTimestampInSeconds) s.Require().Nil(err) // Run Node Service From 9a26ce6c0a12cddbbe0d3987da42e4327a9f637a Mon Sep 17 00:00:00 2001 From: Marcel Moura Date: Wed, 12 Jun 2024 15:44:10 -0300 Subject: [PATCH 14/34] feat: normalize bool config parameters Applied suffixes ENABLED to boolean parameters to match the node internal state and set their appropriate default values. --- CHANGELOG.md | 1 + cmd/cartesi-rollups-node/main.go | 2 +- docs/config.md | 30 ++++++------ internal/node/config/config.go | 59 ++++++++++++----------- internal/node/config/generate/Config.toml | 18 +++---- internal/node/config/generated.go | 48 +++++++++--------- internal/node/node.go | 2 +- internal/node/services.go | 6 +-- test/config.go | 8 +-- 9 files changed, 88 insertions(+), 86 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 2a5d9a9ff..dab80c828 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -15,6 +15,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ### Changed - Bumped Rollups Contracts to 2.0 +- Normalized boolean configuration parameters (`CARTESI_LEGACY_BLOCKCHAIN_ENABLED`, `CARTESI_FEATURE_CLAIMER_ENABLED`, `CARTESI_FEATURE_MACHINE_HASH_CHECK_ENABLED`, `CARTESI_EXPERIMENTAL_SERVER_MANAGER_LOG_BYPASS_ENABLED` and `CARTESI_LOG_PRETTY_ENABLED`) and adjusted their logic accordingly ### Removed diff --git a/cmd/cartesi-rollups-node/main.go b/cmd/cartesi-rollups-node/main.go index d82391541..63a953cbe 100644 --- a/cmd/cartesi-rollups-node/main.go +++ b/cmd/cartesi-rollups-node/main.go @@ -35,7 +35,7 @@ func main() { opts := &tint.Options{ Level: config.LogLevel, AddSource: config.LogLevel == slog.LevelDebug, - NoColor: !config.LogPretty || !isatty.IsTerminal(os.Stdout.Fd()), + NoColor: !config.LogPrettyEnabled || !isatty.IsTerminal(os.Stdout.Fd()), TimeFormat: "2006-01-02T15:04:05.000", // RFC3339 with milliseconds and without timezone } handler := tint.NewHandler(os.Stdout, opts) diff --git a/docs/config.md b/docs/config.md index 8abdf0592..333b9be9d 100644 --- a/docs/config.md +++ b/docs/config.md @@ -96,7 +96,13 @@ An unique identifier representing a blockchain network. * **Type:** `uint64` -## `CARTESI_BLOCKCHAIN_IS_LEGACY` +## `CARTESI_BLOCKCHAIN_WS_ENDPOINT` + +WebSocket endpoint for the blockchain RPC provider. + +* **Type:** `string` + +## `CARTESI_LEGACY_BLOCKCHAIN_ENABLED` If set to true the node will send transactions using the legacy gas fee model (instead of EIP-1559). @@ -104,12 +110,6 @@ If set to true the node will send transactions using the legacy gas fee model * **Type:** `bool` * **Default:** `"false"` -## `CARTESI_BLOCKCHAIN_WS_ENDPOINT` - -WebSocket endpoint for the blockchain RPC provider. - -* **Type:** `string` - ## `CARTESI_CONTRACTS_APPLICATION_ADDRESS` Address of the DApp's contract. @@ -135,7 +135,7 @@ The node will begin to read blockchain events from this block. * **Type:** `int64` -## `CARTESI_EXPERIMENTAL_SERVER_MANAGER_BYPASS_LOG` +## `CARTESI_EXPERIMENTAL_SERVER_MANAGER_LOG_BYPASS_ENABLED` When enabled, prints server-manager output to stdout and stderr directly. All other log configurations are ignored. @@ -156,20 +156,20 @@ External Redis endpoint for the node when running in the experimental sunodo val * **Type:** `string` -## `CARTESI_FEATURE_DISABLE_CLAIMER` +## `CARTESI_FEATURE_CLAIMER_ENABLED` -If set to true, the node will not make claims. +If set to false, the node will not make claims. * **Type:** `bool` -* **Default:** `"false"` +* **Default:** `"true"` -## `CARTESI_FEATURE_DISABLE_MACHINE_HASH_CHECK` +## `CARTESI_FEATURE_MACHINE_HASH_CHECK_ENABLED` -If set to true, the node will *not* check whether the Cartesi machine hash from +If set to false, the node will *not* check whether the Cartesi machine hash from the snapshot matches the hash in the Application contract. * **Type:** `bool` -* **Default:** `"false"` +* **Default:** `"true"` ## `CARTESI_HTTP_ADDRESS` @@ -193,7 +193,7 @@ One of "debug", "info", "warn", "error". * **Type:** `LogLevel` * **Default:** `"info"` -## `CARTESI_LOG_PRETTY` +## `CARTESI_LOG_PRETTY_ENABLED` If set to true, the node will add colors to its log output. diff --git a/internal/node/config/config.go b/internal/node/config/config.go index 5d224ef5b..57b90adfe 100644 --- a/internal/node/config/config.go +++ b/internal/node/config/config.go @@ -13,29 +13,29 @@ import ( // NodeConfig contains all the Node variables. // See the corresponding environment variable for the variable documentation. type NodeConfig struct { - LogLevel LogLevel - LogPretty bool - RollupsEpochLength uint64 - BlockchainID uint64 - BlockchainHttpEndpoint Redacted[string] - BlockchainWsEndpoint Redacted[string] - BlockchainIsLegacy bool - BlockchainFinalityOffset int - BlockchainBlockTimeout int - ContractsApplicationAddress string - ContractsIConsensusAddress string - ContractsInputBoxAddress string - ContractsInputBoxDeploymentBlockNumber int64 - SnapshotDir string - PostgresEndpoint Redacted[string] - HttpAddress string - HttpPort int - FeatureDisableClaimer bool - FeatureDisableMachineHashCheck bool - ExperimentalServerManagerBypassLog bool - ExperimentalSunodoValidatorEnabled bool - ExperimentalSunodoValidatorRedisEndpoint string - Auth Auth + LogLevel LogLevel + LogPrettyEnabled bool + RollupsEpochLength uint64 + BlockchainID uint64 + BlockchainHttpEndpoint Redacted[string] + BlockchainWsEndpoint Redacted[string] + LegacyBlockchainEnabled bool + BlockchainFinalityOffset int + BlockchainBlockTimeout int + ContractsApplicationAddress string + ContractsIConsensusAddress string + ContractsInputBoxAddress string + ContractsInputBoxDeploymentBlockNumber int64 + SnapshotDir string + PostgresEndpoint Redacted[string] + HttpAddress string + HttpPort int + FeatureClaimerEnabled bool + FeatureMachineHashCheckEnabled bool + ExperimentalServerManagerLogBypassEnabled bool + ExperimentalSunodoValidatorEnabled bool + ExperimentalSunodoValidatorRedisEndpoint string + Auth Auth } // Auth is used to sign transactions. @@ -71,12 +71,12 @@ func (r Redacted[T]) String() string { func FromEnv() NodeConfig { var config NodeConfig config.LogLevel = getLogLevel() - config.LogPretty = getLogPretty() + config.LogPrettyEnabled = getLogPrettyEnabled() config.RollupsEpochLength = getEpochLength() config.BlockchainID = getBlockchainId() config.BlockchainHttpEndpoint = Redacted[string]{getBlockchainHttpEndpoint()} config.BlockchainWsEndpoint = Redacted[string]{getBlockchainWsEndpoint()} - config.BlockchainIsLegacy = getBlockchainIsLegacy() + config.LegacyBlockchainEnabled = getLegacyBlockchainEnabled() config.BlockchainFinalityOffset = getBlockchainFinalityOffset() config.BlockchainBlockTimeout = getBlockchainBlockTimeout() config.ContractsApplicationAddress = getContractsApplicationAddress() @@ -87,15 +87,16 @@ func FromEnv() NodeConfig { config.PostgresEndpoint = Redacted[string]{getPostgresEndpoint()} config.HttpAddress = getHttpAddress() config.HttpPort = getHttpPort() - config.FeatureDisableClaimer = getFeatureDisableClaimer() - config.FeatureDisableMachineHashCheck = getFeatureDisableMachineHashCheck() - config.ExperimentalServerManagerBypassLog = getExperimentalServerManagerBypassLog() + config.FeatureClaimerEnabled = getFeatureClaimerEnabled() + config.FeatureMachineHashCheckEnabled = getFeatureMachineHashCheckEnabled() + config.ExperimentalServerManagerLogBypassEnabled = + getExperimentalServerManagerLogBypassEnabled() config.ExperimentalSunodoValidatorEnabled = getExperimentalSunodoValidatorEnabled() if getExperimentalSunodoValidatorEnabled() { config.ExperimentalSunodoValidatorRedisEndpoint = getExperimentalSunodoValidatorRedisEndpoint() } - if !getFeatureDisableClaimer() && !getExperimentalSunodoValidatorEnabled() { + if getFeatureClaimerEnabled() && !getExperimentalSunodoValidatorEnabled() { config.Auth = authFromEnv() } return config diff --git a/internal/node/config/generate/Config.toml b/internal/node/config/generate/Config.toml index e310c8f54..755d95523 100644 --- a/internal/node/config/generate/Config.toml +++ b/internal/node/config/generate/Config.toml @@ -9,7 +9,7 @@ go-type = "LogLevel" description = """ One of "debug", "info", "warn", "error".""" -[logging.CARTESI_LOG_PRETTY] +[logging.CARTESI_LOG_PRETTY_ENABLED] default = "false" go-type = "bool" description = """ @@ -19,17 +19,17 @@ If set to true, the node will add colors to its log output.""" # Features # -[features.CARTESI_FEATURE_DISABLE_CLAIMER] -default = "false" +[features.CARTESI_FEATURE_CLAIMER_ENABLED] +default = "true" go-type = "bool" description = """ -If set to true, the node will not make claims.""" +If set to false, the node will not make claims.""" -[features.CARTESI_FEATURE_DISABLE_MACHINE_HASH_CHECK] -default = "false" +[features.CARTESI_FEATURE_MACHINE_HASH_CHECK_ENABLED] +default = "true" go-type = "bool" description = """ -If set to true, the node will *not* check whether the Cartesi machine hash from +If set to false, the node will *not* check whether the Cartesi machine hash from the snapshot matches the hash in the Application contract.""" # @@ -63,7 +63,7 @@ go-type = "string" description = """ WebSocket endpoint for the blockchain RPC provider.""" -[blockchain.CARTESI_BLOCKCHAIN_IS_LEGACY] +[blockchain.CARTESI_LEGACY_BLOCKCHAIN_ENABLED] default = "false" go-type = "bool" description = """ @@ -218,7 +218,7 @@ go-type = "string" description = """ External Redis endpoint for the node when running in the experimental sunodo validator mode.""" -[experimental.CARTESI_EXPERIMENTAL_SERVER_MANAGER_BYPASS_LOG] +[experimental.CARTESI_EXPERIMENTAL_SERVER_MANAGER_LOG_BYPASS_ENABLED] default = "false" go-type = "bool" description = """ diff --git a/internal/node/config/generated.go b/internal/node/config/generated.go index 2f92f219e..851413543 100644 --- a/internal/node/config/generated.go +++ b/internal/node/config/generated.go @@ -245,26 +245,26 @@ func getBlockchainId() uint64 { return val } -func getBlockchainIsLegacy() bool { - s, ok := os.LookupEnv("CARTESI_BLOCKCHAIN_IS_LEGACY") +func getBlockchainWsEndpoint() string { + s, ok := os.LookupEnv("CARTESI_BLOCKCHAIN_WS_ENDPOINT") if !ok { - s = "false" + panic("missing env var CARTESI_BLOCKCHAIN_WS_ENDPOINT") } - val, err := toBool(s) + val, err := toString(s) if err != nil { - panic(fmt.Sprintf("failed to parse CARTESI_BLOCKCHAIN_IS_LEGACY: %v", err)) + panic(fmt.Sprintf("failed to parse CARTESI_BLOCKCHAIN_WS_ENDPOINT: %v", err)) } return val } -func getBlockchainWsEndpoint() string { - s, ok := os.LookupEnv("CARTESI_BLOCKCHAIN_WS_ENDPOINT") +func getLegacyBlockchainEnabled() bool { + s, ok := os.LookupEnv("CARTESI_LEGACY_BLOCKCHAIN_ENABLED") if !ok { - panic("missing env var CARTESI_BLOCKCHAIN_WS_ENDPOINT") + s = "false" } - val, err := toString(s) + val, err := toBool(s) if err != nil { - panic(fmt.Sprintf("failed to parse CARTESI_BLOCKCHAIN_WS_ENDPOINT: %v", err)) + panic(fmt.Sprintf("failed to parse CARTESI_LEGACY_BLOCKCHAIN_ENABLED: %v", err)) } return val } @@ -317,14 +317,14 @@ func getContractsInputBoxDeploymentBlockNumber() int64 { return val } -func getExperimentalServerManagerBypassLog() bool { - s, ok := os.LookupEnv("CARTESI_EXPERIMENTAL_SERVER_MANAGER_BYPASS_LOG") +func getExperimentalServerManagerLogBypassEnabled() bool { + s, ok := os.LookupEnv("CARTESI_EXPERIMENTAL_SERVER_MANAGER_LOG_BYPASS_ENABLED") if !ok { s = "false" } val, err := toBool(s) if err != nil { - panic(fmt.Sprintf("failed to parse CARTESI_EXPERIMENTAL_SERVER_MANAGER_BYPASS_LOG: %v", err)) + panic(fmt.Sprintf("failed to parse CARTESI_EXPERIMENTAL_SERVER_MANAGER_LOG_BYPASS_ENABLED: %v", err)) } return val } @@ -353,26 +353,26 @@ func getExperimentalSunodoValidatorRedisEndpoint() string { return val } -func getFeatureDisableClaimer() bool { - s, ok := os.LookupEnv("CARTESI_FEATURE_DISABLE_CLAIMER") +func getFeatureClaimerEnabled() bool { + s, ok := os.LookupEnv("CARTESI_FEATURE_CLAIMER_ENABLED") if !ok { - s = "false" + s = "true" } val, err := toBool(s) if err != nil { - panic(fmt.Sprintf("failed to parse CARTESI_FEATURE_DISABLE_CLAIMER: %v", err)) + panic(fmt.Sprintf("failed to parse CARTESI_FEATURE_CLAIMER_ENABLED: %v", err)) } return val } -func getFeatureDisableMachineHashCheck() bool { - s, ok := os.LookupEnv("CARTESI_FEATURE_DISABLE_MACHINE_HASH_CHECK") +func getFeatureMachineHashCheckEnabled() bool { + s, ok := os.LookupEnv("CARTESI_FEATURE_MACHINE_HASH_CHECK_ENABLED") if !ok { - s = "false" + s = "true" } val, err := toBool(s) if err != nil { - panic(fmt.Sprintf("failed to parse CARTESI_FEATURE_DISABLE_MACHINE_HASH_CHECK: %v", err)) + panic(fmt.Sprintf("failed to parse CARTESI_FEATURE_MACHINE_HASH_CHECK_ENABLED: %v", err)) } return val } @@ -413,14 +413,14 @@ func getLogLevel() LogLevel { return val } -func getLogPretty() bool { - s, ok := os.LookupEnv("CARTESI_LOG_PRETTY") +func getLogPrettyEnabled() bool { + s, ok := os.LookupEnv("CARTESI_LOG_PRETTY_ENABLED") if !ok { s = "false" } val, err := toBool(s) if err != nil { - panic(fmt.Sprintf("failed to parse CARTESI_LOG_PRETTY: %v", err)) + panic(fmt.Sprintf("failed to parse CARTESI_LOG_PRETTY_ENABLED: %v", err)) } return val } diff --git a/internal/node/node.go b/internal/node/node.go index 2ec878388..7cb6cbef2 100644 --- a/internal/node/node.go +++ b/internal/node/node.go @@ -18,7 +18,7 @@ func Setup(ctx context.Context, c config.NodeConfig, workDir string) (services.S return nil, err } - if !c.FeatureDisableMachineHashCheck { + if c.FeatureMachineHashCheckEnabled { if err := validateMachineHash( ctx, c.SnapshotDir, diff --git a/internal/node/services.go b/internal/node/services.go index 1a49b8277..c1baafeb1 100644 --- a/internal/node/services.go +++ b/internal/node/services.go @@ -66,7 +66,7 @@ func newAuthorityClaimer(c config.NodeConfig, workDir string) services.CommandSe s.Env = append(s.Env, fmt.Sprintf("TX_PROVIDER_HTTP_ENDPOINT=%v", c.BlockchainHttpEndpoint.Value)) s.Env = append(s.Env, fmt.Sprintf("TX_CHAIN_ID=%v", c.BlockchainID)) - s.Env = append(s.Env, fmt.Sprintf("TX_CHAIN_IS_LEGACY=%v", c.BlockchainIsLegacy)) + s.Env = append(s.Env, fmt.Sprintf("TX_CHAIN_IS_LEGACY=%v", c.LegacyBlockchainEnabled)) s.Env = append(s.Env, fmt.Sprintf("TX_DEFAULT_CONFIRMATIONS=%v", c.BlockchainFinalityOffset)) s.Env = append(s.Env, fmt.Sprintf("REDIS_ENDPOINT=%v", getRedisEndpoint(c))) @@ -118,8 +118,8 @@ func newSupervisorService(c config.NodeConfig, workDir string) services.Supervis s = append(s, newRedis(c, workDir)) } - // enable claimer if reader mode and sunodo validator mode are disabled - if !c.FeatureDisableClaimer && !c.ExperimentalSunodoValidatorEnabled { + // enable claimer if reader mode and sunodo validator mode are not enabled + if c.FeatureClaimerEnabled && !c.ExperimentalSunodoValidatorEnabled { s = append(s, newAuthorityClaimer(c, workDir)) } diff --git a/test/config.go b/test/config.go index b30e741a2..2698d2fb7 100644 --- a/test/config.go +++ b/test/config.go @@ -31,7 +31,7 @@ func NewLocalNodeConfig(localPostgresEndpoint string, localBlockchainHttpEndpoin //Log nodeConfig.LogLevel = slog.LevelInfo - nodeConfig.LogPretty = false + nodeConfig.LogPrettyEnabled = false //Postgres nodeConfig.PostgresEndpoint = @@ -46,7 +46,7 @@ func NewLocalNodeConfig(localPostgresEndpoint string, localBlockchainHttpEndpoin config.Redacted[string]{Value: localBlockchainHttpEndpoint} nodeConfig.BlockchainWsEndpoint = config.Redacted[string]{Value: localBlockchainWsEndpoint} - nodeConfig.BlockchainIsLegacy = false + nodeConfig.LegacyBlockchainEnabled = false nodeConfig.BlockchainFinalityOffset = LocalFinalityOffset nodeConfig.BlockchainBlockTimeout = LocalBlockTimeout @@ -61,8 +61,8 @@ func NewLocalNodeConfig(localPostgresEndpoint string, localBlockchainHttpEndpoin nodeConfig.HttpPort = LocalHttpPort //Features - nodeConfig.FeatureDisableClaimer = false - nodeConfig.FeatureDisableMachineHashCheck = false + nodeConfig.FeatureClaimerEnabled = true + nodeConfig.FeatureMachineHashCheckEnabled = true //Experimental nodeConfig.ExperimentalSunodoValidatorEnabled = false From be7d30750caa957f450c5a2462776b2c02ba4fc1 Mon Sep 17 00:00:00 2001 From: Renan Santos Date: Mon, 3 Jun 2024 12:51:17 -0300 Subject: [PATCH 15/34] fix: stop end-to-end testing --- Makefile | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 247ae1173..13267952a 100644 --- a/Makefile +++ b/Makefile @@ -6,10 +6,10 @@ submodules: ## Download the git submodules @git submodule update --init --recursive .PHONY: test -test: unit-test e2e-test ## Execute all tests +test: unit-test ## Execute all tests .PHONY: unit-test -unit-test:## Execute unit tests +unit-test: ## Execute unit tests @echo "Running unit tests" @go test ./... @@ -91,3 +91,4 @@ docker-clean: ## Remove the containers and volumes from previous compose run .PHONY: help help: ## Show help for each of the Makefile recipes @grep "##" $(MAKEFILE_LIST) | grep -v grep | sed -e 's/:.*##\(.*\)/:\n\t\1\n/' + From 42b47474397caffc9b44029cc73385e50bdb5f4d Mon Sep 17 00:00:00 2001 From: Renan Santos Date: Thu, 6 Jun 2024 14:38:04 -0300 Subject: [PATCH 16/34] chore: update machine version and devnet addresses --- build/compose-devnet.yaml | 2 +- build/docker-bake.hcl | 29 +++++++++++++++-------------- pkg/addresses/addresses.go | 2 +- setup_env.sh | 2 +- 4 files changed, 18 insertions(+), 17 deletions(-) diff --git a/build/compose-devnet.yaml b/build/compose-devnet.yaml index 80a6d9846..b3dbd5d31 100644 --- a/build/compose-devnet.yaml +++ b/build/compose-devnet.yaml @@ -19,7 +19,7 @@ services: CARTESI_BLOCKCHAIN_WS_ENDPOINT: "ws://devnet:8545" CARTESI_BLOCKCHAIN_IS_LEGACY: "false" CARTESI_BLOCKCHAIN_FINALITY_OFFSET: "1" - CARTESI_CONTRACTS_APPLICATION_ADDRESS: "0xb72c832dDeA10326143831F1E5F1646920C9c990" + CARTESI_CONTRACTS_APPLICATION_ADDRESS: "0x2E663fe9aE92275242406A185AA4fC8174339D3E" CARTESI_CONTRACTS_ICONSENSUS_ADDRESS: "0x77e5a5fb18F72b5106621f66C704c006c6dB4578" CARTESI_CONTRACTS_INPUT_BOX_ADDRESS: "0xA1b8EB1F13d8D5Db976a653BbDF8972cfD14691C" CARTESI_CONTRACTS_INPUT_BOX_DEPLOYMENT_BLOCK_NUMBER: "16" diff --git a/build/docker-bake.hcl b/build/docker-bake.hcl index 97d82d813..2c2025ca7 100644 --- a/build/docker-bake.hcl +++ b/build/docker-bake.hcl @@ -17,22 +17,23 @@ target "common" { dockerfile = "./build/Dockerfile" context = ".." args = { - BASE_IMAGE = "debian:bookworm-20240311-slim" - RUST_VERSION = "1.78.0" - GO_VERSION = "1.22.1" - FOUNDRY_NIGHTLY_VERSION = "293fad73670b7b59ca901c7f2105bf7a29165a90" - MACHINE_EMULATOR_VERSION = "0.16.1" - TOOLS_VERSION = "0.14.1" - LINUX_VERSION = "0.19.1" - LINUX_KERNEL_VERSION = "6.5.9-ctsi-1-v0.19.1" + BASE_IMAGE = "debian:bookworm-20240311-slim" + RUST_VERSION = "1.78.0" + GO_VERSION = "1.22.1" + FOUNDRY_NIGHTLY_VERSION = "293fad73670b7b59ca901c7f2105bf7a29165a90" + MACHINE_EMULATOR_VERSION = "0.17.0" + MACHINE_TOOLS_VERSION = "0.15.0" + MACHINE_IMAGE_KERNEL_VERSION = "0.20.0" + MACHINE_KERNEL_VERSION = "6.5.13" + MACHINE_XGENEXT2FS_VERSION = "1.5.6" } } target "rollups-node" { inherits = ["common"] target = "rollups-node" - args = { - ROLLUPS_NODE_VERSION = "devel" + args = { + ROLLUPS_NODE_VERSION = "devel" } } @@ -46,9 +47,9 @@ target "rollups-node-devnet" { target = "rollups-node-devnet" } -target "rollups-node-ci-base" { - inherits = ["common"] - target = "rollups-node-ci-base" +target "rollups-node-ci" { + inherits = ["common"] + target = "rollups-node-ci" dockerfile = "./Dockerfile" - context = "." + context = "." } diff --git a/pkg/addresses/addresses.go b/pkg/addresses/addresses.go index 8a4b710a2..466b63254 100644 --- a/pkg/addresses/addresses.go +++ b/pkg/addresses/addresses.go @@ -35,7 +35,7 @@ type Book struct { // Get the addresses for the test environment. func GetTestBook() *Book { return &Book{ - Application: common.HexToAddress("0xb72c832dDeA10326143831F1E5F1646920C9c990"), + Application: common.HexToAddress("0x2E663fe9aE92275242406A185AA4fC8174339D3E"), ApplicationFactory: common.HexToAddress("0x39cc8d1faB70F713784032f166aB7Fe3B4801144"), Authority: common.HexToAddress("0x77e5a5fb18F72b5106621f66C704c006c6dB4578"), AuthorityFactory: common.HexToAddress("0x5EF4260c72a7A8df752AFF49aC46Ba741754E04a"), diff --git a/setup_env.sh b/setup_env.sh index 45c932e45..f56a34762 100644 --- a/setup_env.sh +++ b/setup_env.sh @@ -9,7 +9,7 @@ export CARTESI_BLOCKCHAIN_WS_ENDPOINT="ws://localhost:8545" export CARTESI_BLOCKCHAIN_IS_LEGACY="false" export CARTESI_BLOCKCHAIN_FINALITY_OFFSET="1" export CARTESI_BLOCKCHAIN_BLOCK_TIMEOUT="60" -export CARTESI_CONTRACTS_APPLICATION_ADDRESS="0xb72c832dDeA10326143831F1E5F1646920C9c990" +export CARTESI_CONTRACTS_APPLICATION_ADDRESS="0x2E663fe9aE92275242406A185AA4fC8174339D3E" export CARTESI_CONTRACTS_ICONSENSUS_ADDRESS="0x77e5a5fb18F72b5106621f66C704c006c6dB4578" export CARTESI_CONTRACTS_INPUT_BOX_ADDRESS="0xA1b8EB1F13d8D5Db976a653BbDF8972cfD14691C" export CARTESI_CONTRACTS_INPUT_BOX_DEPLOYMENT_BLOCK_NUMBER="16" From 86d196d17215e44d4f247d3b7746d7d8a671b57b Mon Sep 17 00:00:00 2001 From: Renan Santos Date: Thu, 6 Jun 2024 14:38:47 -0300 Subject: [PATCH 17/34] feat: update the Dockerfile with emulator dependencies - Adds linux.bin and rootfs.ext2 download to the emulator stage. - Adds a emulator-devel stage that install libcmt and xgenext2fs. - Configures the CI image to use CGO. - Refactors the Dockerfile to improve readability. --- .github/workflows/build.yml | 6 +- .github/workflows/clean-up-images.yml | 2 +- build/Dockerfile | 457 +++++++++++++++++--------- build/docker-bake.override.hcl | 4 +- internal/node/machinehash_test.go | 3 +- 5 files changed, 306 insertions(+), 166 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f0b157164..ba1b16077 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -140,7 +140,7 @@ jobs: uses: docker/metadata-action@v5 with: images: | - name=ghcr.io/cartesi/rollups-node-ci-base + name=ghcr.io/cartesi/rollups-node-ci tags: | type=semver,pattern={{version}} type=ref,event=branch @@ -162,7 +162,7 @@ jobs: ./docker-bake.hcl ${{ steps.docker_meta.outputs.bake-file }} ./docker-bake.platforms.hcl - targets: rollups-node-ci-base + targets: rollups-node-ci push: true project: ${{ vars.DEPOT_PROJECT }} workdir: build @@ -174,7 +174,7 @@ jobs: test-go: runs-on: ubuntu-22.04 container: - image: ghcr.io/cartesi/rollups-node-ci-base:${{needs.build-ci-base.outputs.output}} + image: ghcr.io/cartesi/rollups-node-ci:${{needs.build-ci-base.outputs.output}} needs: - build-ci-base steps: diff --git a/.github/workflows/clean-up-images.yml b/.github/workflows/clean-up-images.yml index d3d030c78..acccfeb41 100644 --- a/.github/workflows/clean-up-images.yml +++ b/.github/workflows/clean-up-images.yml @@ -17,7 +17,7 @@ jobs: matrix: image: - rollups-node - - rollups-node-ci-base + - rollups-node-ci steps: - uses: vlaurin/action-ghcr-prune@v0.6.0 with: diff --git a/build/Dockerfile b/build/Dockerfile index a65da65e6..fde0a34d5 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -3,20 +3,18 @@ # syntax=docker.io/docker/dockerfile:1 -# This dockerfile contains multiple stages to build three final targets. -# The file was split in a section for each final target. - -# Version config that should be set in the bake file. +# Version configuration that should be set in the bake file. ARG BASE_IMAGE ARG RUST_VERSION ARG GO_VERSION ARG FOUNDRY_NIGHTLY_VERSION -ARG MACHINE_EMULATOR_VERSION -ARG ROOTFS_VERSION -ARG LINUX_VERSION -ARG LINUX_KERNEL_VERSION ARG ROM_VERSION ARG ROLLUPS_NODE_VERSION +ARG MACHINE_EMULATOR_VERSION +ARG MACHINE_TOOLS_VERSION +ARG MACHINE_IMAGE_KERNEL_VERSION +ARG MACHINE_KERNEL_VERSION +ARG MACHINE_XGENEXT2FS_VERSION # Build directories. ARG SNAPSHOT_BUILD_PATH=/build/snapshot @@ -24,177 +22,323 @@ ARG DEVNET_BUILD_PATH=/build/devnet ARG RUST_BUILD_PATH=/build/rollups-node/rust ARG GO_BUILD_PATH=/build/rollups-node/go -# Runtime dir for the cartesi-machine snapshot. +# Runtime directory for the cartesi-machine snapshot. ARG SNAPSHOT_RUNTIME_PATH=/usr/share/cartesi/snapshot -#################################################################################################### -# STAGE: emulator-base +# ============================================================================= +# STAGE: emulator # -# This stage creates a base-image with the Cartesi machine emulator. -# The result is used as the base for the snapshot and the node targets. -# We do this instead of using the cartesi/machine-emulator image to have control over the distro -# used by the base image. -FROM ${BASE_IMAGE} as emulator-base +# - Install ca-certificates and curl (setup). +# - Install the machine-emulator. +# - Download linux.bin. +# - Download rootfs.ext2. +# +# NOTE: We do not use the cartesi/machine-emulator image to have control over +# the distro used by the base image. +# ============================================================================= + +FROM ${BASE_IMAGE} as emulator -# Install machine-emulator ARG MACHINE_EMULATOR_VERSION +ARG MACHINE_TOOLS_VERSION +ARG MACHINE_IMAGE_KERNEL_VERSION +ARG MACHINE_KERNEL_VERSION +ARG DEBIAN_FRONTEND=noninteractive + +# Install ca-certificates and curl (setup). +RUN < /dev/null + apt-get update + apt-get install -y --no-install-recommends \ + docker-ce \ + docker-ce-cli \ + containerd.io \ + docker-buildx-plugin \ + docker-compose-plugin + # Cartesi Machine Emulator + SDK_URL=https://github.com/cartesi/machine-emulator-sdk +EOF + +# ============================================================================= # STAGE: snapshot-builder # -# This stage builds the snapshot using the machine emulator as base image. -FROM emulator-base as snapshot-builder - -# Download rootfs and linux. -# Add these files to the directories the cartesi-machine expects. -WORKDIR /usr/share/cartesi-machine/images/ -ARG TOOLS_VERSION -ARG LINUX_VERSION -ARG LINUX_KERNEL_VERSION -ADD https://github.com/cartesi/machine-emulator-tools/releases/download/v${TOOLS_VERSION}/rootfs-tools-v${TOOLS_VERSION}.ext2 rootfs.ext2 -ADD https://github.com/cartesi/image-kernel/releases/download/v${LINUX_VERSION}/linux-${LINUX_KERNEL_VERSION}.bin linux.bin - -# Generate snapshot with echo and store it. +# - Build an echo snapshot. +# +# DEPRECATED: this stage is going to be deleted, as the CI won't be generating +# machine snapshots in the future. +# ============================================================================= + +FROM emulator as snapshot-builder + WORKDIR /build ARG SNAPSHOT_BUILD_PATH RUN cartesi-machine \ --ram-length=128Mi \ --store=$SNAPSHOT_BUILD_PATH \ + --no-rollback \ -- "ioctl-echo-loop --vouchers=1 --notices=1 --reports=1 --verbose=1" +# ============================================================================= # STAGE: rollups-node-snapshot # -# This stage copies the image from the builder. -# We use the emulator as base image so we can easily create a container with a volume shared with -# the rollups-node container. -FROM emulator-base as rollups-node-snapshot +# This stage copies the image from the snapshot-builder. +# We use the emulator as the base image so we can easily create a container +# that shares a volume with the rollups-node container. +# +# DEPRECATED: this stage is going to be deleted, as the CI won't be generating +# machine snapshots in the future. +# ============================================================================= + +FROM emulator as rollups-node-snapshot # Copy image from the builder stage. ARG SNAPSHOT_BUILD_PATH ARG SNAPSHOT_RUNTIME_PATH WORKDIR ${SNAPSHOT_RUNTIME_PATH} -COPY --from=snapshot-builder --chown=cartesi:cartesi ${SNAPSHOT_BUILD_PATH} ${SNAPSHOT_RUNTIME_PATH} +COPY --from=snapshot-builder --chown=cartesi:cartesi \ + ${SNAPSHOT_BUILD_PATH} ${SNAPSHOT_RUNTIME_PATH} # Set dummy command. CMD /bin/bash -#################################################################################################### -# TARGET: rollups-node-devnet -# -# This target contains the Ethereum node that rollups node uses for testing. -# This target requires the machine-snapshot built in the snapshot-builder stage. - +# ============================================================================= # STAGE: devnet-base # -# This stage installs Foundry. +# - Install ca-certificates, curl, and git (setup). +# - Install Foundry from downloaded pre-compiled binaries. +# ============================================================================= + FROM ${BASE_IMAGE} as devnet-base -# Install system dependencies. +# Install ca-certificates, curl, and git (setup). ARG DEBIAN_FRONTEND=noninteractive RUN < /dev/null - apt-get update - apt-get install -y --no-install-recommends docker-ce docker-ce-cli containerd.io docker-buildx-plugin docker-compose-plugin -EOF \ No newline at end of file diff --git a/build/docker-bake.override.hcl b/build/docker-bake.override.hcl index d2204f478..69177bd88 100644 --- a/build/docker-bake.override.hcl +++ b/build/docker-bake.override.hcl @@ -21,6 +21,6 @@ target "rollups-node-devnet" { tags = ["${DOCKER_ORGANIZATION}/rollups-node-devnet:${TAG}"] } -target "rollups-node-ci-base" { - tags = ["${DOCKER_ORGANIZATION}/rollups-node-ci-base:${TAG}"] +target "rollups-node-ci" { + tags = ["${DOCKER_ORGANIZATION}/rollups-node-ci:${TAG}"] } diff --git a/internal/node/machinehash_test.go b/internal/node/machinehash_test.go index c1aefc8fb..76dc1b24d 100644 --- a/internal/node/machinehash_test.go +++ b/internal/node/machinehash_test.go @@ -116,8 +116,7 @@ func mockMachineDir(hash string) (string, error) { return temp, nil } -// Generates a new Cartesi Machine snapshot in a temporary directory and returns -// its path +// Generates a new Cartesi Machine snapshot in a temporary directory and returns its path func createMachineSnapshot() (string, error) { tmpDir, err := os.MkdirTemp("", "") if err != nil { From acd8c26fac135d7b0470677d30b0b235902aa936 Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Fri, 14 Jun 2024 08:33:47 -0300 Subject: [PATCH 18/34] fix: update postgraphile command and lock version --- build/Dockerfile | 4 ++-- internal/node/services.go | 1 + 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/build/Dockerfile b/build/Dockerfile index fde0a34d5..f696b582e 100644 --- a/build/Dockerfile +++ b/build/Dockerfile @@ -468,8 +468,8 @@ RUN < Date: Thu, 13 Jun 2024 17:33:33 -0300 Subject: [PATCH 19/34] fix: adjust boolean env vars after normalization --- setup_env.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/setup_env.sh b/setup_env.sh index f56a34762..35478d5cc 100644 --- a/setup_env.sh +++ b/setup_env.sh @@ -1,12 +1,12 @@ +#!/usr/bin/env bash +# (c) Cartesi and individual authors (see AUTHORS) +# SPDX-License-Identifier: Apache-2.0 (see LICENSE) export CARTESI_LOG_LEVEL="info" -export CARTESI_LOG_PRETTY="true" -export CARTESI_FEATURE_DISABLE_CLAIMER="false" -export CARTESI_FEATURE_DISABLE_MACHINE_HASH_CHECK="false" +export CARTESI_LOG_PRETTY_ENABLED="true" export CARTESI_EPOCH_LENGTH="10" export CARTESI_BLOCKCHAIN_ID="31337" export CARTESI_BLOCKCHAIN_HTTP_ENDPOINT="http://localhost:8545" export CARTESI_BLOCKCHAIN_WS_ENDPOINT="ws://localhost:8545" -export CARTESI_BLOCKCHAIN_IS_LEGACY="false" export CARTESI_BLOCKCHAIN_FINALITY_OFFSET="1" export CARTESI_BLOCKCHAIN_BLOCK_TIMEOUT="60" export CARTESI_CONTRACTS_APPLICATION_ADDRESS="0x2E663fe9aE92275242406A185AA4fC8174339D3E" From e81cd0811c6fa22b58aefab0d573743d736cbc9f Mon Sep 17 00:00:00 2001 From: Marcel Moura <5615598+marcelstanley@users.noreply.github.com> Date: Thu, 11 Jul 2024 10:29:21 -0300 Subject: [PATCH 20/34] chore: fix file format --- test/config.go | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/test/config.go b/test/config.go index 2698d2fb7..520dcd866 100644 --- a/test/config.go +++ b/test/config.go @@ -13,13 +13,13 @@ import ( ) const ( - LocalBlockchainID = 31337 + LocalBlockchainID = 31337 LocalInputBoxDeploymentBlockNumber = 16 - LocalHttpAddress = "0.0.0.0" - LocalHttpPort = 10000 - LocalBlockTimeout = 120 - LocalFinalityOffset = 1 - LocalEpochLength = 5 + LocalHttpAddress = "0.0.0.0" + LocalHttpPort = 10000 + LocalBlockTimeout = 120 + LocalFinalityOffset = 1 + LocalEpochLength = 5 ) func NewLocalNodeConfig(localPostgresEndpoint string, localBlockchainHttpEndpoint string, From 00d84ea781fbd4b37be689efa48e0423aa6d6324 Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 20:48:44 -0300 Subject: [PATCH 21/34] feat(data): add base repository migrations --- go.mod | 5 ++ go.sum | 11 +++ internal/repository/migrations.go | 38 ++++++++ ...ut_claim_output_report_nodeconfig.down.sql | 14 +++ ...nput_claim_output_report_nodeconfig.up.sql | 88 +++++++++++++++++++ 5 files changed, 156 insertions(+) create mode 100644 internal/repository/migrations.go create mode 100644 internal/repository/migrations/000001_create_application_input_claim_output_report_nodeconfig.down.sql create mode 100644 internal/repository/migrations/000001_create_application_input_claim_output_report_nodeconfig.up.sql diff --git a/go.mod b/go.mod index a66ac9a02..2209e463c 100644 --- a/go.mod +++ b/go.mod @@ -62,13 +62,17 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang-migrate/migrate/v4 v4.17.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect + github.com/hashicorp/errwrap v1.1.0 // indirect + github.com/hashicorp/go-multierror v1.1.1 // indirect github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/yaml v0.2.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.9 // indirect + github.com/lib/pq v1.10.9 // indirect github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae // indirect github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect @@ -101,6 +105,7 @@ require ( go.opentelemetry.io/otel v1.28.0 // indirect go.opentelemetry.io/otel/metric v1.28.0 // indirect go.opentelemetry.io/otel/trace v1.28.0 // indirect + go.uber.org/atomic v1.7.0 // indirect golang.org/x/crypto v0.24.0 // indirect golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect golang.org/x/mod v0.19.0 // indirect diff --git a/go.sum b/go.sum index d6de95f12..57f7cc444 100644 --- a/go.sum +++ b/go.sum @@ -144,6 +144,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-migrate/migrate/v4 v4.17.1 h1:4zQ6iqL6t6AiItphxJctQb3cFqWiSpMnX7wLTPnnYO4= +github.com/golang-migrate/migrate/v4 v4.17.1/go.mod h1:m8hinFyWBn0SA4QKHuKh175Pm9wjmxj3S2Mia7dbXzM= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk= @@ -159,8 +161,13 @@ github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aN github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= +github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= +github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= +github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE= github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0= +github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= +github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4 h1:X4egAf/gcS1zATw6wn4Ej8vjuVGxeHdan+bRb2ebyv4= github.com/holiman/billy v0.0.0-20240216141850-2abb0c79d3c4/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc= github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao= @@ -190,6 +197,8 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0 github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c= github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lmittmann/tint v1.0.4 h1:LeYihpJ9hyGvE0w+K2okPTGUdVLfng1+nDNVR4vWISc= github.com/lmittmann/tint v1.0.4/go.mod h1:HIS3gSy7qNwGCj+5oRjAutErFBl4BzdQP6cJZ0NfMwE= github.com/lufia/plan9stats v0.0.0-20240513124658-fba389f38bae h1:dIZY4ULFcto4tAFlj1FYZl8ztUZ13bdq+PLY+NOfbyI= @@ -328,6 +337,8 @@ go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+ go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI= go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= +go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw= +go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= golang.org/x/crypto v0.0.0-20170613210332-850760c427c5/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= diff --git a/internal/repository/migrations.go b/internal/repository/migrations.go new file mode 100644 index 000000000..507696cbf --- /dev/null +++ b/internal/repository/migrations.go @@ -0,0 +1,38 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package repository + +import ( + "embed" + "errors" + "log/slog" + + "github.com/golang-migrate/migrate/v4" + _ "github.com/golang-migrate/migrate/v4/database/postgres" + _ "github.com/golang-migrate/migrate/v4/source/file" + "github.com/golang-migrate/migrate/v4/source/iofs" +) + +//go:embed migrations/* +var content embed.FS + +func RunMigrations(postgres_endpoint string) { + driver, err := iofs.New(content, "migrations") + if err != nil { + slog.Error("Unable to use embed files", + "error", err) + } + + migration, err := migrate.NewWithSourceInstance("iofs", driver, postgres_endpoint) + if err != nil { + slog.Error("Unable to setup migrations", + "error", err) + } + if err := migration.Up(); err != nil { + if !errors.Is(err, migrate.ErrNoChange) { + slog.Error("Unable to run migrations", + "error", err) + } + } +} diff --git a/internal/repository/migrations/000001_create_application_input_claim_output_report_nodeconfig.down.sql b/internal/repository/migrations/000001_create_application_input_claim_output_report_nodeconfig.down.sql new file mode 100644 index 000000000..0fba61bf5 --- /dev/null +++ b/internal/repository/migrations/000001_create_application_input_claim_output_report_nodeconfig.down.sql @@ -0,0 +1,14 @@ +-- (c) Cartesi and individual authors (see AUTHORS) +-- SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +DROP TABLE IF EXISTS "node_config"; +DROP TABLE IF EXISTS "report"; +DROP TABLE IF EXISTS "output"; +DROP TABLE IF EXISTS "claim"; +DROP TABLE IF EXISTS "input"; +DROP TABLE IF EXISTS "application"; + +DROP TYPE IF EXISTS "InputCompletionStatus"; +DROP TYPE IF EXISTS "ClaimStatus"; +DROP TYPE IF EXISTS "ApplicationStatus"; +DROP TYPE IF EXISTS "DefaultBlock"; diff --git a/internal/repository/migrations/000001_create_application_input_claim_output_report_nodeconfig.up.sql b/internal/repository/migrations/000001_create_application_input_claim_output_report_nodeconfig.up.sql new file mode 100644 index 000000000..849c0e786 --- /dev/null +++ b/internal/repository/migrations/000001_create_application_input_claim_output_report_nodeconfig.up.sql @@ -0,0 +1,88 @@ +-- (c) Cartesi and individual authors (see AUTHORS) +-- SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +CREATE TYPE "ApplicationStatus" AS ENUM ('RUNNING', 'NOT RUNNING'); + +CREATE TYPE "InputCompletionStatus" AS ENUM ('NONE', 'ACCEPTED', 'REJECTED', 'EXCEPTION', 'MACHINE_HALTED', 'CYCLE_LIMIT_EXCEEDED', 'TIME_LIMIT_EXCEEDED', 'PAYLOAD_LENGTH_LIMIT_EXCEEDED'); + +CREATE TYPE "ClaimStatus" AS ENUM ('PENDING', 'SUBMITTED', 'FINALIZED'); + +CREATE TYPE "DefaultBlock" AS ENUM ('FINALIZED', 'LATEST', 'PENDING', 'SAFE'); + +CREATE TABLE "application" +( + "id" SERIAL, + "contract_address" BYTEA NOT NULL, + "template_hash" BYTEA NOT NULL, + "snapshot_uri" VARCHAR(4096) NOT NULL, + "last_processed_block" NUMERIC(20,0) NOT NULL, + "status" "ApplicationStatus" NOT NULL, + "epoch_length" INT NOT NULL, + CONSTRAINT "application_pkey" PRIMARY KEY ("id"), + UNIQUE("contract_address") +); + +CREATE TABLE "input" +( + "id" BIGSERIAL, + "index" NUMERIC(20,0) NOT NULL, + "raw_data" BYTEA NOT NULL, + "block_number" NUMERIC(20,0) NOT NULL, + "status" "InputCompletionStatus" NOT NULL, + "machine_hash" BYTEA, + "outputs_hash" BYTEA, + "application_address" BYTEA NOT NULL, + CONSTRAINT "input_pkey" PRIMARY KEY ("id"), + CONSTRAINT "input_application_address_fkey" FOREIGN KEY ("application_address") REFERENCES "application"("contract_address"), + UNIQUE("index", "application_address") +); + +CREATE INDEX "input_idx" ON "input"("block_number"); + +CREATE TABLE "claim" +( + "id" BIGSERIAL, + "index" NUMERIC(20,0) NOT NULL, + "output_merkle_root_hash" BYTEA NOT NULL, + "transaction_hash" BYTEA, + "status" "ClaimStatus" NOT NULL, + "application_address" BYTEA NOT NULL, + CONSTRAINT "claim_pkey" PRIMARY KEY ("id"), + CONSTRAINT "claim_application_address_fkey" FOREIGN KEY ("application_address") REFERENCES "application"("contract_address"), + UNIQUE("index", "application_address") +); + +CREATE TABLE "output" +( + "id" BIGSERIAL, + "index" NUMERIC(20,0) NOT NULL, + "raw_data" BYTEA NOT NULL, + "hash" BYTEA, + "output_hashes_siblings" BYTEA[], + "input_id" BIGINT NOT NULL, + CONSTRAINT "output_pkey" PRIMARY KEY ("id"), + CONSTRAINT "output_input_id_fkey" FOREIGN KEY ("input_id") REFERENCES "input"("id") +); + +CREATE UNIQUE INDEX "output_idx" ON "output"("index"); + +CREATE TABLE "report" +( + "id" BIGSERIAL, + "index" NUMERIC(20,0) NOT NULL, + "raw_data" BYTEA NOT NULL, + "input_id" BIGINT NOT NULL, + CONSTRAINT "report_pkey" PRIMARY KEY ("id"), + CONSTRAINT "report_input_id_fkey" FOREIGN KEY ("input_id") REFERENCES "input"("id") +); + +CREATE UNIQUE INDEX "report_idx" ON "report"("index"); + +CREATE TABLE "node_config" +( + "default_block" "DefaultBlock" NOT NULL, + "input_box_deployment_block" INT NOT NULL, + "input_box_address" BYTEA NOT NULL, + "chain_id" INT NOT NULL, + "iconsensus_address" BYTEA NOT NULL +); From fbbce1b7d77ad23b1fe30eb8b0c4d5563d3deffa Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 20:50:48 -0300 Subject: [PATCH 22/34] feat(node): update node models --- internal/node/model/models.go | 101 ++++++++++++++++++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 internal/node/model/models.go diff --git a/internal/node/model/models.go b/internal/node/model/models.go new file mode 100644 index 000000000..b70b6b1b2 --- /dev/null +++ b/internal/node/model/models.go @@ -0,0 +1,101 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package model + +import ( + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/common/hexutil" +) + +type ( + Hash = common.Hash + Address = common.Address + InputCompletionStatus string + ClaimStatus string + ApplicationStatus string + DefaultBlock string +) + +const ( + InputStatusNone InputCompletionStatus = "NONE" + InputStatusAccepted InputCompletionStatus = "ACCEPTED" + InputStatusRejected InputCompletionStatus = "REJECTED" + InputStatusException InputCompletionStatus = "EXCEPTION" + InputStatusMachineHalted InputCompletionStatus = "MACHINE_HALTED" + InputStatusCycleLimitExceeded InputCompletionStatus = "CYCLE_LIMIT_EXCEEDED" + InputStatusTimeLimitExceeded InputCompletionStatus = "TIME_LIMIT_EXCEEDED" + InputStatusPayloadLengthLimitExceeded InputCompletionStatus = "PAYLOAD_LENGTH_LIMIT_EXCEEDED" +) + +const ( + ClaimStatusPending ClaimStatus = "PENDING" + ClaimStatusSubmitted ClaimStatus = "SUBMITTED" + ClaimStatusFinalized ClaimStatus = "FINALIZED" +) + +const ( + ApplicationStatusRunning ApplicationStatus = "RUNNING" + ApplicationStatusNotRunning ApplicationStatus = "NOT RUNNING" +) + +const ( + DefaultBlockStatusLatest DefaultBlock = "LATEST" + DefaultBlockStatusFinalized DefaultBlock = "FINALIZED" + DefaultBlockStatusPending DefaultBlock = "PENDING" + DefaultBlockStatusSafe DefaultBlock = "SAFE" +) + +type NodePersistentConfig struct { + DefaultBlock DefaultBlock + InputBoxDeploymentBlock uint64 + InputBoxAddress Address + ChainId uint64 + IConsensusAddress Address +} + +type Application struct { + Id uint64 + ContractAddress Address + TemplateHash Hash + SnapshotURI string + LastProcessedBlock uint64 + EpochLength uint64 + Status ApplicationStatus +} + +type Input struct { + Id uint64 + Index uint64 + CompletionStatus InputCompletionStatus + RawData hexutil.Bytes + BlockNumber uint64 + MachineHash *Hash + OutputsHash *Hash + AppAddress Address +} + +type Output struct { + Id uint64 + Index uint64 + RawData hexutil.Bytes + Hash *Hash + OutputHashesSiblings []Hash + InputId uint64 +} + +type Report struct { + Id uint64 + Index uint64 + RawData hexutil.Bytes + InputId uint64 +} + +type Claim struct { + Id uint64 + Index uint64 + Status ClaimStatus + OutputMerkleRootHash Hash + TransactionHash *Hash + AppAddress Address +} From e6b03ec8ea3fd79ce556d47b0aaa58c98b22f8a4 Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 21:03:34 -0300 Subject: [PATCH 23/34] feat(data): add base repository functions --- go.mod | 4 + go.sum | 8 + internal/repository/base.go | 606 ++++++++++++++++++++++++++++++++++++ 3 files changed, 618 insertions(+) create mode 100644 internal/repository/base.go diff --git a/go.mod b/go.mod index 2209e463c..88ee3bcc9 100644 --- a/go.mod +++ b/go.mod @@ -70,6 +70,10 @@ require ( github.com/holiman/uint256 v1.2.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/invopop/yaml v0.2.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect + github.com/jackc/pgx/v5 v5.6.0 // indirect + github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.9 // indirect github.com/lib/pq v1.10.9 // indirect diff --git a/go.sum b/go.sum index 57f7cc444..acf510c24 100644 --- a/go.sum +++ b/go.sum @@ -180,6 +180,14 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2 github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/invopop/yaml v0.2.0 h1:7zky/qH+O0DwAyoobXUqvVBwgBFRxKoQ/3FjcVpjTMY= github.com/invopop/yaml v0.2.0/go.mod h1:2XuRLgs/ouIrW3XNzuNj7J3Nvu/Dig5MXvbCEdiBN3Q= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a h1:bbPeKD0xmW/Y25WS6cokEszi5g+S0QxI/d45PkRi7Nk= +github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= +github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= +github.com/jackc/puddle/v2 v2.2.1 h1:RhxXJtFG022u4ibrCSMSiu5aOq1i77R3OHKNJj77OAk= +github.com/jackc/puddle/v2 v2.2.1/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus= github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= diff --git a/internal/repository/base.go b/internal/repository/base.go new file mode 100644 index 000000000..c1997f4d5 --- /dev/null +++ b/internal/repository/base.go @@ -0,0 +1,606 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package repository + +import ( + "context" + "errors" + "fmt" + "log/slog" + "sync" + + . "github.com/cartesi/rollups-node/internal/node/model" + "github.com/jackc/pgx/v5" + "github.com/jackc/pgx/v5/pgxpool" +) + +type database struct { + db *pgxpool.Pool +} + +var ErrInsertRow = errors.New("unable to insert row") + +func Connect( + ctx context.Context, + postgresEndpoint string, +) (*database, error) { + var ( + pgError error + pgInstance *database + pgOnce sync.Once + ) + + pgOnce.Do(func() { + dbpool, err := pgxpool.New(ctx, postgresEndpoint) + if err != nil { + pgError = fmt.Errorf("unable to create connection pool: %w\n", err) + } + + pgInstance = &database{dbpool} + }) + + return pgInstance, pgError +} + +func (pg *database) Close() { + if pg != nil { + pg.db.Close() + } +} + +func (pg *database) InsertNodeConfig( + ctx context.Context, + config *NodePersistentConfig, +) error { + query := ` + INSERT INTO node_config + (default_block, + input_box_deployment_block, + input_box_address, + chain_id, + iconsensus_address) + SELECT + @defaultBlock, + @deploymentBlock, + @inputBoxAddress, + @chainId, + @iConsensusAddress + WHERE NOT EXISTS (SELECT * FROM node_config)` + + args := pgx.NamedArgs{ + "defaultBlock": config.DefaultBlock, + "deploymentBlock": config.InputBoxDeploymentBlock, + "inputBoxAddress": config.InputBoxAddress, + "chainId": config.ChainId, + "iConsensusAddress": config.IConsensusAddress, + } + + _, err := pg.db.Exec(ctx, query, args) + if err != nil { + return fmt.Errorf("%w: %w", ErrInsertRow, err) + } + + return nil +} + +func (pg *database) InsertApplication( + ctx context.Context, + app *Application, +) error { + query := ` + INSERT INTO application + (contract_address, + template_hash, + snapshot_uri, + last_processed_block, + epoch_length, + status) + VALUES + (@contractAddress, + @templateHash, + @snapshotUri, + @lastProcessedBlock, + @epochLength, + @status)` + + args := pgx.NamedArgs{ + "contractAddress": app.ContractAddress, + "templateHash": app.TemplateHash, + "snapshotUri": app.SnapshotURI, + "lastProcessedBlock": app.LastProcessedBlock, + "epochLength": app.EpochLength, + "status": app.Status, + } + + _, err := pg.db.Exec(ctx, query, args) + if err != nil { + return fmt.Errorf("%w: %w", ErrInsertRow, err) + } + + return nil +} + +func (pg *database) InsertInput( + ctx context.Context, + input *Input, +) error { + query := ` + INSERT INTO input + (index, + status, + raw_data, + block_number, + machine_hash, + outputs_hash, + application_address) + VALUES + (@index, + @status, + @rawData, + @blockNumber, + @machineHash, + @outputsHash, + @applicationAddress)` + + args := pgx.NamedArgs{ + "index": input.Index, + "status": input.CompletionStatus, + "rawData": input.RawData, + "blockNumber": input.BlockNumber, + "machineHash": input.MachineHash, + "outputsHash": input.OutputsHash, + "applicationAddress": input.AppAddress, + } + + _, err := pg.db.Exec(ctx, query, args) + if err != nil { + return fmt.Errorf("%w: %w", ErrInsertRow, err) + } + + return nil +} + +func (pg *database) InsertOutput( + ctx context.Context, + output *Output, +) error { + query := ` + INSERT INTO output + (index, + raw_data, + output_hashes_siblings, + input_id) + VALUES + (@index, + @rawData, + @outputHashesSiblings, + @inputId)` + + args := pgx.NamedArgs{ + "inputId": output.InputId, + "index": output.Index, + "rawData": output.RawData, + "outputHashesSiblings": output.OutputHashesSiblings, + } + + _, err := pg.db.Exec(ctx, query, args) + if err != nil { + return fmt.Errorf("%w: %w", ErrInsertRow, err) + } + + return nil +} + +func (pg *database) InsertReport( + ctx context.Context, + report *Report, +) error { + query := ` + INSERT INTO report + (index, + raw_data, + input_id) + VALUES + (@index, + @rawData, + @inputId)` + + args := pgx.NamedArgs{ + "inputId": report.InputId, + "index": report.Index, + "rawData": report.RawData, + } + + _, err := pg.db.Exec(ctx, query, args) + if err != nil { + return fmt.Errorf("%w: %w", ErrInsertRow, err) + } + + return nil +} + +func (pg *database) InsertClaim( + ctx context.Context, + claim *Claim, +) error { + query := ` + INSERT INTO claim + (index, + output_merkle_root_hash, + transaction_hash, + status, + application_address) + VALUES + (@index, + @outputMerkleRootHash, + @transactionHash, + @status, + @applicationAddress)` + + args := pgx.NamedArgs{ + "index": claim.Index, + "outputMerkleRootHash": claim.OutputMerkleRootHash, + "transactionHash": claim.TransactionHash, + "status": claim.Status, + "applicationAddress": claim.AppAddress, + } + + _, err := pg.db.Exec(ctx, query, args) + if err != nil { + return fmt.Errorf("%w: %w", ErrInsertRow, err) + } + + return nil +} + +func (pg *database) GetNodeConfig( + ctx context.Context, +) (*NodePersistentConfig, error) { + var ( + defaultBlock DefaultBlock + deploymentBlock uint64 + inputBoxAddress Address + chainId uint64 + iConsensusAddress Address + ) + + query := ` + SELECT + default_block, + input_box_deployment_block, + input_box_address, + chain_id, + iconsensus_address + FROM + node_config` + + err := pg.db.QueryRow(ctx, query).Scan( + &defaultBlock, + &deploymentBlock, + &inputBoxAddress, + &chainId, + &iConsensusAddress, + ) + if err != nil { + return nil, fmt.Errorf("GetNodeConfig QueryRow failed: %w\n", err) + } + + config := NodePersistentConfig{ + DefaultBlock: defaultBlock, + InputBoxDeploymentBlock: deploymentBlock, + InputBoxAddress: inputBoxAddress, + ChainId: chainId, + IConsensusAddress: iConsensusAddress, + } + + return &config, nil +} + +func (pg *database) GetApplication( + ctx context.Context, + appAddressKey Address, +) (*Application, error) { + var ( + id uint64 + contractAddress Address + templateHash Hash + snapshotUri string + lastProcessedBlock uint64 + epochLength uint64 + status ApplicationStatus + ) + + query := ` + SELECT + id, + contract_address, + template_hash, + snapshot_uri, + last_processed_block, + epoch_length, + status + FROM + application + WHERE + contract_address=@contractAddress` + + args := pgx.NamedArgs{ + "contractAddress": appAddressKey, + } + + err := pg.db.QueryRow(ctx, query, args).Scan( + &id, + &contractAddress, + &templateHash, + &snapshotUri, + &lastProcessedBlock, + &epochLength, + &status, + ) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + slog.Info("GetApplication returned no rows", "service", "repository") + return nil, nil + } + return nil, fmt.Errorf("GetApplication QueryRow failed: %w\n", err) + } + + app := Application{ + Id: id, + ContractAddress: contractAddress, + TemplateHash: templateHash, + SnapshotURI: snapshotUri, + LastProcessedBlock: lastProcessedBlock, + EpochLength: epochLength, + Status: status, + } + + return &app, nil +} + +func (pg *database) GetInput( + ctx context.Context, + indexKey uint64, + appAddressKey Address, +) (*Input, error) { + var ( + id uint64 + index uint64 + status InputCompletionStatus + rawData []byte + blockNumber uint64 + machineHash *Hash + outputsHash *Hash + appAddress Address + ) + + query := ` + SELECT + id, + index, + raw_data, + status, + block_number, + machine_hash, + outputs_hash, + application_address + FROM + input + WHERE + index=@index and application_address=@appAddress` + + args := pgx.NamedArgs{ + "index": indexKey, + "appAddress": appAddressKey, + } + + err := pg.db.QueryRow(ctx, query, args).Scan( + &id, + &index, + &rawData, + &status, + &blockNumber, + &machineHash, + &outputsHash, + &appAddress, + ) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + slog.Info("GetInput returned no rows", "service", "repository") + return nil, nil + } + return nil, fmt.Errorf("GetInput QueryRow failed: %w\n", err) + } + + input := Input{ + Id: id, + Index: index, + CompletionStatus: status, + RawData: rawData, + BlockNumber: blockNumber, + MachineHash: machineHash, + OutputsHash: outputsHash, + AppAddress: appAddress, + } + + return &input, nil +} + +func (pg *database) GetOutput( + ctx context.Context, + indexKey uint64, + appAddressKey Address, +) (*Output, error) { + var ( + id uint64 + index uint64 + rawData []byte + hash *Hash + outputHashesSiblings []Hash + inputId uint64 + ) + + query := ` + SELECT + o.id, + o.index, + o.raw_data, + o.hash, + o.output_hashes_siblings, + o.input_id + FROM + output o + INNER JOIN + input i + ON + o.input_id=i.id + WHERE + o.index=@index and i.application_address=@appAddress` + + args := pgx.NamedArgs{ + "index": indexKey, + "appAddress": appAddressKey, + } + + err := pg.db.QueryRow(ctx, query, args).Scan( + &id, + &index, + &rawData, + &hash, + &outputHashesSiblings, + &inputId, + ) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + slog.Info("GetOutput returned no rows", "service", "repository") + return nil, nil + } + return nil, fmt.Errorf("GetOutput QueryRow failed: %w\n", err) + } + + output := Output{ + Id: id, + Index: index, + RawData: rawData, + Hash: hash, + OutputHashesSiblings: outputHashesSiblings, + InputId: inputId, + } + + return &output, nil +} + +func (pg *database) GetReport( + ctx context.Context, + indexKey uint64, + appAddressKey Address, +) (*Report, error) { + var ( + id uint64 + index uint64 + rawData []byte + inputId uint64 + ) + query := ` + SELECT + r.id, + r.index, + r.raw_data, + r.input_id + FROM + report r + INNER JOIN + input i + ON + r.input_id=i.id + WHERE + r.index=@index and i.application_address=@appAddress` + + args := pgx.NamedArgs{ + "index": indexKey, + "appAddress": appAddressKey, + } + err := pg.db.QueryRow(ctx, query, args).Scan( + &id, + &index, + &rawData, + &inputId, + ) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + slog.Info("GetReport returned no rows", "service", "repository") + return nil, nil + } + return nil, fmt.Errorf("GetReport QueryRow failed: %w\n", err) + } + + report := Report{ + Id: id, + Index: index, + RawData: rawData, + InputId: inputId, + } + + return &report, nil +} + +func (pg *database) GetClaim( + ctx context.Context, + appAddressKey Address, + indexKey uint64, +) (*Claim, error) { + var ( + id uint64 + index uint64 + outputMerkleRootHash Hash + transactionHash *Hash + status ClaimStatus + address Address + ) + + query := ` + SELECT + id, + index, + output_merkle_root_hash, + transaction_hash, + status, + application_address + FROM + claim + WHERE + application_address=@appAddress and index=@index` + + args := pgx.NamedArgs{ + "appAddress": appAddressKey, + "index": indexKey, + } + + err := pg.db.QueryRow(ctx, query, args).Scan( + &id, + &index, + &outputMerkleRootHash, + &transactionHash, + &status, + &address, + ) + if err != nil { + if errors.Is(err, pgx.ErrNoRows) { + slog.Info("GetClaim returned no rows", "service", "repository") + return nil, nil + } + return nil, fmt.Errorf("GetClaim QueryRow failed: %w\n", err) + } + + claim := Claim{ + Id: id, + Index: index, + OutputMerkleRootHash: outputMerkleRootHash, + TransactionHash: transactionHash, + Status: status, + AppAddress: address, + } + + return &claim, nil +} From 58903f4a28599b7d2cc1f826d495dbb7520917a4 Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 21:05:46 -0300 Subject: [PATCH 24/34] feat(data): add base repository tests --- go.mod | 6 +- go.sum | 9 + internal/repository/base_test.go | 407 +++++++++++++++++++++++++++++++ 3 files changed, 420 insertions(+), 2 deletions(-) create mode 100644 internal/repository/base_test.go diff --git a/go.mod b/go.mod index 88ee3bcc9..7150ee474 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/ethereum/go-ethereum v1.14.6 github.com/spf13/cobra v1.8.1 github.com/stretchr/testify v1.9.0 - github.com/testcontainers/testcontainers-go v0.30.0 + github.com/testcontainers/testcontainers-go v0.32.0 github.com/tyler-smith/go-bip32 v1.0.0 github.com/tyler-smith/go-bip39 v1.1.0 ) @@ -49,7 +49,7 @@ require ( github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v25.0.5+incompatible // indirect + github.com/docker/docker v27.0.3+incompatible // indirect github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/ethereum/c-kzg-4844 v1.0.2 // indirect @@ -81,6 +81,7 @@ require ( github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/mmcloughlin/addchain v0.4.0 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/sys/sequential v0.5.0 // indirect github.com/moby/sys/user v0.1.0 // indirect @@ -101,6 +102,7 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/supranational/blst v0.3.12 // indirect + github.com/testcontainers/testcontainers-go/modules/postgres v0.32.0 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/vektah/gqlparser/v2 v2.5.16 // indirect diff --git a/go.sum b/go.sum index acf510c24..e36bc1924 100644 --- a/go.sum +++ b/go.sum @@ -102,6 +102,8 @@ github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5Qvfr github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= +github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= @@ -230,6 +232,8 @@ github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8oh github.com/mmcloughlin/addchain v0.4.0 h1:SobOdjm2xLj1KkXN5/n0xTIWyZA2+s99UCY1iPfkHRY= github.com/mmcloughlin/addchain v0.4.0/go.mod h1:A86O+tHqZLMNO4w6ZZ4FlVQEadcoqkyU72HC5wJ4RlU= github.com/mmcloughlin/profile v0.1.1/go.mod h1:IhHD7q1ooxgwTgjxQYkACGA77oFTDdFVejUS1/tS/qU= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc= @@ -308,6 +312,10 @@ github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70 github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E= github.com/testcontainers/testcontainers-go v0.30.0/go.mod h1:K+kHNGiM5zjklKjgTtcrEetF3uhWbMUyqAQoyoh8Pf0= +github.com/testcontainers/testcontainers-go v0.32.0 h1:ug1aK08L3gCHdhknlTTwWjPHPS+/alvLJU/DRxTD/ME= +github.com/testcontainers/testcontainers-go v0.32.0/go.mod h1:CRHrzHLQhlXUsa5gXjTOfqIEJcrK5+xMDmBr/WMI88E= +github.com/testcontainers/testcontainers-go/modules/postgres v0.32.0 h1:ZE4dTdswj3P0j71nL+pL0m2e5HTXJwPoIFr+DDgdPaU= +github.com/testcontainers/testcontainers-go/modules/postgres v0.32.0/go.mod h1:njrNuyuoF2fjhVk6TG/R3Oeu82YwfYkbf5WVTyBXhV4= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.8.0 h1:Mx4Wwe/FjZLeQsK/6kt2EOepwwSl7SmJrK5bV/dXYgY= @@ -421,6 +429,7 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= +gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= diff --git a/internal/repository/base_test.go b/internal/repository/base_test.go new file mode 100644 index 000000000..6097dd328 --- /dev/null +++ b/internal/repository/base_test.go @@ -0,0 +1,407 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package repository + +import ( + "context" + "testing" + "time" + + . "github.com/cartesi/rollups-node/internal/node/model" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/suite" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" +) + +const testTimeout = 300 * time.Second + +// This suite sets up a container running a postgres database +type RepositorySuite struct { + suite.Suite + ctx context.Context + cancel context.CancelFunc + postgres *postgres.PostgresContainer + database *database +} + +func (s *RepositorySuite) SetupSuite() { + s.ctx, s.cancel = context.WithTimeout(context.Background(), testTimeout) + + var err error + s.postgres, err = newPostgresContainer(s.ctx) + s.Require().Nil(err) + + endpoint, err := s.postgres.ConnectionString(s.ctx, "sslmode=disable") + s.Require().Nil(err) + RunMigrations(endpoint) + + s.database, err = Connect(s.ctx, endpoint) + s.Require().Nil(err) + + s.SetupDatabase() +} + +func (s *RepositorySuite) TearDownSuite() { + err := s.postgres.Terminate(s.ctx) + s.Nil(err) + s.cancel() +} + +func (s *RepositorySuite) SetupDatabase() { + config := NodePersistentConfig{ + DefaultBlock: DefaultBlockStatusFinalized, + InputBoxDeploymentBlock: 1, + InputBoxAddress: common.HexToAddress("deadbeef"), + ChainId: 1, + IConsensusAddress: common.HexToAddress("deadbeef"), + } + + err := s.database.InsertNodeConfig(s.ctx, &config) + s.Require().Nil(err) + + app := Application{ + Id: 1, + ContractAddress: common.HexToAddress("deadbeef"), + TemplateHash: common.HexToHash("deadbeef"), + SnapshotURI: "this/is/a/test", + LastProcessedBlock: 1, + EpochLength: 10, + Status: ApplicationStatusRunning, + } + + err = s.database.InsertApplication(s.ctx, &app) + s.Require().Nil(err) + + genericHash := common.HexToHash("deadbeef") + + input1 := Input{ + Index: 1, + CompletionStatus: InputStatusAccepted, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 1, + MachineHash: &genericHash, + OutputsHash: &genericHash, + AppAddress: common.HexToAddress("deadbeef"), + } + + err = s.database.InsertInput(s.ctx, &input1) + s.Require().Nil(err) + + input2 := Input{ + Index: 2, + CompletionStatus: InputStatusNone, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 3, + MachineHash: &genericHash, + OutputsHash: &genericHash, + AppAddress: common.HexToAddress("deadbeef"), + } + + err = s.database.InsertInput(s.ctx, &input2) + s.Require().Nil(err) + + var siblings []Hash + siblings = append(siblings, genericHash) + + output0 := Output{ + Index: 1, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: nil, + } + + err = s.database.InsertOutput(s.ctx, &output0) + s.Require().Nil(err) + + output1 := Output{ + Index: 2, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: nil, + } + + err = s.database.InsertOutput(s.ctx, &output1) + s.Require().Nil(err) + + output3 := Output{ + Index: 3, + InputId: 2, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: siblings, + } + + err = s.database.InsertOutput(s.ctx, &output3) + s.Require().Nil(err) + + report := Report{ + Index: 1, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + } + + err = s.database.InsertReport(s.ctx, &report) + s.Require().Nil(err) + + claim := Claim{ + Status: ClaimStatusPending, + Index: 1, + OutputMerkleRootHash: genericHash, + TransactionHash: &genericHash, + AppAddress: common.HexToAddress("deadbeef"), + } + + err = s.database.InsertClaim(s.ctx, &claim) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestApplicationExists() { + app := Application{ + Id: 1, + ContractAddress: common.HexToAddress("deadbeef"), + TemplateHash: common.HexToHash("deadbeef"), + SnapshotURI: "this/is/a/test", + LastProcessedBlock: 1, + EpochLength: 10, + Status: ApplicationStatusRunning, + } + + response, err := s.database.GetApplication(s.ctx, common.HexToAddress("deadbeef")) + s.Require().Equal(&app, response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestApplicationDoesntExist() { + response, err := s.database.GetApplication(s.ctx, common.HexToAddress("deadbeefaaa")) + s.Require().Nil(response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestApplicationFailsDuplicateRow() { + app := Application{ + Id: 1, + ContractAddress: common.HexToAddress("deadbeef"), + TemplateHash: common.HexToHash("deadbeef"), + SnapshotURI: "this/is/a/test", + LastProcessedBlock: 0, + EpochLength: 10, + Status: ApplicationStatusRunning, + } + + err := s.database.InsertApplication(s.ctx, &app) + s.Require().ErrorContains(err, "duplicate key value") +} + +func (s *RepositorySuite) TestInputExists() { + genericHash := common.HexToHash("deadbeef") + + input := Input{ + Id: 1, + Index: 1, + CompletionStatus: InputStatusAccepted, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 1, + MachineHash: &genericHash, + OutputsHash: &genericHash, + AppAddress: common.HexToAddress("deadbeef"), + } + + response, err := s.database.GetInput(s.ctx, 1, common.HexToAddress("deadbeef")) + s.Require().Equal(&input, response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestInputDoesntExist() { + response, err := s.database.GetInput(s.ctx, 10, common.HexToAddress("deadbeef")) + s.Require().Nil(response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestInputFailsDuplicateRow() { + input := Input{ + Index: 1, + CompletionStatus: InputStatusNone, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 1, + AppAddress: common.HexToAddress("deadbeef"), + } + + err := s.database.InsertInput(s.ctx, &input) + s.Require().ErrorContains(err, "duplicate key value") +} + +func (s *RepositorySuite) TestInputFailsApplicationDoesntExist() { + input := Input{ + Index: 3, + CompletionStatus: InputStatusNone, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 3, + AppAddress: common.HexToAddress("deadbeefaaa"), + } + + err := s.database.InsertInput(s.ctx, &input) + s.Require().ErrorContains(err, "violates foreign key constraint") +} + +func (s *RepositorySuite) TestOutputExists() { + var siblings []Hash + siblings = append(siblings, common.HexToHash("deadbeef")) + + output := Output{ + Id: 1, + Index: 1, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: siblings, + } + + response, err := s.database.GetOutput(s.ctx, 1, common.HexToAddress("deadbeef")) + s.Require().Equal(&output, response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestOutputDoesntExist() { + response, err := s.database.GetOutput(s.ctx, 10, common.HexToAddress("deadbeef")) + s.Require().Nil(response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestOutputFailsDuplicateRow() { + output := Output{ + Index: 1, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: nil, + } + + err := s.database.InsertOutput(s.ctx, &output) + s.Require().ErrorContains(err, "duplicate key value") +} + +func (s *RepositorySuite) TestOutputFailsInputDoesntExist() { + output := Output{ + Index: 10, + InputId: 10, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: nil, + } + + err := s.database.InsertOutput(s.ctx, &output) + s.Require().ErrorContains(err, "violates foreign key constraint") +} + +func (s *RepositorySuite) TestReportExists() { + report := Report{ + Id: 1, + Index: 1, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + } + + response, err := s.database.GetReport(s.ctx, 1, common.HexToAddress("deadbeef")) + s.Require().Equal(&report, response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestReportDoesntExist() { + response, err := s.database.GetReport(s.ctx, 10, common.HexToAddress("deadbeef")) + s.Require().Nil(response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestReportFailsDuplicateRow() { + report := Report{ + Index: 1, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + } + + err := s.database.InsertReport(s.ctx, &report) + s.Require().ErrorContains(err, "duplicate key value") +} + +func (s *RepositorySuite) TestReportFailsInputDoesntExist() { + report := Report{ + Index: 2, + InputId: 10, + RawData: common.Hex2Bytes("deadbeef"), + } + + err := s.database.InsertReport(s.ctx, &report) + s.Require().ErrorContains(err, "violates foreign key constraint") +} + +func (s *RepositorySuite) TestClaimExists() { + genericHash := common.HexToHash("deadbeef") + + claim := Claim{ + Id: 1, + Status: ClaimStatusPending, + Index: 1, + TransactionHash: &genericHash, + OutputMerkleRootHash: common.HexToHash("deadbeef"), + AppAddress: common.HexToAddress("deadbeef"), + } + + response, err := s.database.GetClaim(s.ctx, common.HexToAddress("deadbeef"), 1) + s.Require().Equal(claim, *response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestClaimDoesntExist() { + response, err := s.database.GetClaim(s.ctx, common.HexToAddress("deadbeef"), 0) + s.Require().Nil(response) + s.Require().Nil(err) +} + +func (s *RepositorySuite) TestClaimFailsDuplicateRow() { + claim := Claim{ + Status: ClaimStatusPending, + Index: 1, + OutputMerkleRootHash: common.HexToHash("deadbeef"), + AppAddress: common.HexToAddress("deadbeef"), + } + + err := s.database.InsertClaim(s.ctx, &claim) + s.Require().ErrorContains(err, "duplicate key value") +} + +func (s *RepositorySuite) TestClaimFailsApplicationDoesntExist() { + claim := Claim{ + Status: ClaimStatusPending, + Index: 2, + OutputMerkleRootHash: common.HexToHash("deadbeef"), + AppAddress: common.HexToAddress("deadbeefaaa"), + } + + err := s.database.InsertClaim(s.ctx, &claim) + s.Require().ErrorContains(err, "violates foreign key constraint") +} + +func TestRepositorySuite(t *testing.T) { + suite.Run(t, new(RepositorySuite)) +} + +// We use the postgres alpine docker image to test the repository. +func newPostgresContainer(ctx context.Context) (*postgres.PostgresContainer, error) { + dbName := "postgres" + dbUser := "postgres" + dbPassword := "password" + + // Start the postgres container + container, err := postgres.Run( + ctx, + "postgres:16-alpine", + postgres.WithDatabase(dbName), + postgres.WithUsername(dbUser), + postgres.WithPassword(dbPassword), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(5*time.Second)), + ) + + return container, err +} From 6406487b0c6a3997e6cd762883cd269211eedefc Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 21:06:28 -0300 Subject: [PATCH 25/34] feat(data): add validator repository functions --- internal/repository/validator.go | 190 +++++++++++++++++++++++++++++++ 1 file changed, 190 insertions(+) create mode 100644 internal/repository/validator.go diff --git a/internal/repository/validator.go b/internal/repository/validator.go new file mode 100644 index 000000000..32a63aff3 --- /dev/null +++ b/internal/repository/validator.go @@ -0,0 +1,190 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package repository + +import ( + "context" + "fmt" + "time" + + . "github.com/cartesi/rollups-node/internal/node/model" + "github.com/jackc/pgx/v5" +) + +const DefaultServiceTimeout = 5 * time.Minute + +func (pg *database) GetLastProcessedBlock( + ctx context.Context, + appAddress Address, +) (uint64, error) { + var block uint64 + + query := ` + SELECT + last_processed_block + FROM + application + WHERE + contract_address=@address` + + args := pgx.NamedArgs{ + "address": appAddress, + } + + err := pg.db.QueryRow(ctx, query, args).Scan(&block) + if err != nil { + return 0, fmt.Errorf("QueryRow failed: %v\n", err) + } + + return block, nil +} + +func (pg *database) GetAllOutputsFromProcessedInputs( + ctx context.Context, + startBlock uint64, + endBlock uint64, + timeout *time.Duration, +) ([]Output, error) { + ctxTimeout, cancel := context.WithTimeout(ctx, *timeout) + defer cancel() + for { + select { + case <-ctxTimeout.Done(): + return nil, fmt.Errorf("GetAllOutputsFromProcessedInputs timeout") // timeout + default: + outputs, err := pg.getAllOutputsFromProcessedInputs(ctxTimeout, startBlock, endBlock) + if outputs != nil { + return outputs, nil + } + + if err != nil { + return nil, err + } + } + } +} + +func (pg *database) getAllOutputsFromProcessedInputs( + ctx context.Context, + startBlock uint64, + endBlock uint64, +) ([]Output, error) { + query := ` + SELECT + o.id, + o.index, + o.raw_data, + o.input_id, + i.status + FROM + output o + INNER JOIN + input i + ON + o.input_id=i.id + WHERE + i.block_number BETWEEN @startBlock and @endBlock + ORDER BY + o.index asc` + + args := pgx.NamedArgs{ + "startBlock": startBlock, + "endBlock": endBlock, + } + + rows, err := pg.db.Query(ctx, query, args) + if err != nil { + return nil, fmt.Errorf("Query failed: %v\n", err) + } + + var id, input_id, index uint64 + var rawData []byte + var status string + var results []Output + + rowCount := 0 + + _, err = pgx.ForEachRow(rows, []any{&id, &index, &rawData, &input_id, &status}, + func() error { + rowCount++ + if status != string(InputStatusNone) { + output := Output{ + Id: id, + Index: index, + RawData: rawData, + InputId: input_id, + } + results = append(results, output) + } + return nil + }) + if err != nil { + return nil, fmt.Errorf("ForEachRow failed: %w\n", err) + } + + if len(results) == rowCount { + return results, nil + } + + return nil, nil +} + +func (pg *database) FinishEpoch( + ctx context.Context, + claim *Claim, + outputs []Output, +) error { + query1 := ` + INSERT INTO claim + (index, + output_merkle_root_hash, + status, + application_address) + VALUES + (@index, + @outputMerkleRootHash, + @status, + @appAddress)` + + query2 := ` + UPDATE output + SET + output_hashes_siblings=@outputHashesSiblings + WHERE + index=@index` + + args := pgx.NamedArgs{ + "index": claim.Index, + "status": ClaimStatusPending, + "outputMerkleRootHash": claim.OutputMerkleRootHash, + "appAddress": claim.AppAddress, + } + + tx, err := pg.db.Begin(ctx) + if err != nil { + return fmt.Errorf("unable to finish epoch: %w\n", err) + } + _, err = tx.Exec(ctx, query1, args) + if err != nil { + return fmt.Errorf("unable to finish epoch: %w\n", err) + } + + for _, output := range outputs { + outputArgs := pgx.NamedArgs{ + "outputHashesSiblings": output.OutputHashesSiblings, + "index": output.Index, + } + _, err = tx.Exec(ctx, query2, outputArgs) + if err != nil { + return fmt.Errorf("unable to finish epoch: %w\n", err) + } + } + + err = tx.Commit(ctx) + if err != nil { + return fmt.Errorf("unable to finish epoch: %w\n", err) + } + + return nil +} From 0d22e9c4b13fcf432447161da979a0011ecdd86a Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 21:07:15 -0300 Subject: [PATCH 26/34] feat(data): add validator repository tests --- internal/repository/validator_test.go | 113 ++++++++++++++++++++++++++ 1 file changed, 113 insertions(+) create mode 100644 internal/repository/validator_test.go diff --git a/internal/repository/validator_test.go b/internal/repository/validator_test.go new file mode 100644 index 000000000..fc0061f09 --- /dev/null +++ b/internal/repository/validator_test.go @@ -0,0 +1,113 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package repository + +import ( + "time" + + . "github.com/cartesi/rollups-node/internal/node/model" + "github.com/ethereum/go-ethereum/common" +) + +func (s *RepositorySuite) TestGetMostRecentBlock() { + var block uint64 = 1 + + response, err := s.database.GetLastProcessedBlock(s.ctx, common.HexToAddress("deadbeef")) + s.Require().Nil(err) + + s.Require().Equal(block, response) +} + +func (s *RepositorySuite) TestGetAllOutputsFromProcessedInputs() { + output0 := Output{ + Id: 1, + Index: 1, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: nil, + } + + output1 := Output{ + Id: 2, + Index: 2, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: nil, + } + + timeout := 5 * time.Second + + response, err := s.database.GetAllOutputsFromProcessedInputs(s.ctx, 1, 2, &timeout) + s.Require().Nil(err) + s.Require().Equal(output0, response[0]) + s.Require().Equal(output1, response[1]) +} + +func (s *RepositorySuite) TestGetAllOutputsFromProcessedInputsTimeout() { + timeout := 5 * time.Second + + _, err := s.database.GetAllOutputsFromProcessedInputs(s.ctx, 2, 3, &timeout) + s.Require().ErrorContains(err, "timeout") +} + +func (s *RepositorySuite) TestFinishEpochTransaction() { + var siblings []Hash + siblings = append(siblings, common.HexToHash("deadbeef")) + + output := Output{ + Id: 1, + Index: 1, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: siblings, + } + + claim := Claim{ + Id: 4, + Index: 2, + Status: ClaimStatusPending, + OutputMerkleRootHash: common.HexToHash("deadbeef"), + AppAddress: common.HexToAddress("deadbeef"), + } + + var outputs []Output + outputs = append(outputs, output) + + err := s.database.FinishEpoch(s.ctx, &claim, outputs) + s.Require().Nil(err) + + response0, err := s.database.GetClaim(s.ctx, common.HexToAddress("deadbeef"), 2) + s.Require().Nil(err) + s.Require().Equal(claim, *response0) + + response1, err := s.database.GetOutput(s.ctx, 1, common.HexToAddress("deadbeef")) + s.Require().Nil(err) + s.Require().Equal(output, *response1) +} + +func (s *RepositorySuite) TestFinishEpochTransactionRollback() { + var siblings []Hash + siblings = append(siblings, common.HexToHash("deadbeef")) + + output := Output{ + Id: 2, + Index: 2, + InputId: 1, + RawData: common.Hex2Bytes("deadbeef"), + OutputHashesSiblings: siblings, + } + + claim := Claim{ + Index: 2, + Status: ClaimStatusPending, + OutputMerkleRootHash: common.HexToHash("deadbeef"), + AppAddress: common.HexToAddress("deadbeef"), + } + + var outputs []Output + outputs = append(outputs, output) + + err := s.database.FinishEpoch(s.ctx, &claim, outputs) + s.Require().ErrorContains(err, "unable to finish epoch") +} From ae903816410d4e45682bf10837c615d7ab708773 Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 21:07:57 -0300 Subject: [PATCH 27/34] feat(data): add input reader repository functions --- internal/repository/inputreader.go | 136 +++++++++++++++++++++++++++++ 1 file changed, 136 insertions(+) create mode 100644 internal/repository/inputreader.go diff --git a/internal/repository/inputreader.go b/internal/repository/inputreader.go new file mode 100644 index 000000000..6bddef05b --- /dev/null +++ b/internal/repository/inputreader.go @@ -0,0 +1,136 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package repository + +import ( + "context" + "errors" + "fmt" + + . "github.com/cartesi/rollups-node/internal/node/model" + "github.com/jackc/pgx/v5" +) + +func (pg *database) InsertInputsAndUpdateLastProcessedBlock( + ctx context.Context, + inputs []Input, + blockNumber uint64, + contractAddress Address, +) error { + var errInsertInputs = errors.New("unable to insert inputs") + + query := ` + INSERT INTO input + (index, + status, + raw_data, + block_number, + application_address) + VALUES + (@index, + @status, + @rawData, + @blockNumber, + @appAddress)` + + query2 := ` + UPDATE application + SET last_processed_block = @blockNumber + WHERE + contract_address=@contractAddress` + + args := pgx.NamedArgs{ + "blockNumber": blockNumber, + "contractAddress": contractAddress, + } + + tx, err := pg.db.Begin(ctx) + if err != nil { + return fmt.Errorf("%w: %w", errInsertInputs, err) + } + + for _, input := range inputs { + inputArgs := pgx.NamedArgs{ + "index": input.Index, + "status": input.CompletionStatus, + "rawData": input.RawData, + "blockNumber": input.BlockNumber, + "appAddress": input.AppAddress, + } + _, err = tx.Exec(ctx, query, inputArgs) + if err != nil { + return fmt.Errorf("%w: %w", errInsertInputs, err) + } + } + + _, err = tx.Exec(ctx, query2, args) + if err != nil { + return fmt.Errorf("%w: %w", errInsertInputs, err) + } + + err = tx.Commit(ctx) + if err != nil { + return fmt.Errorf("%w: %w", errInsertInputs, err) + } + + return nil +} + +func (pg *database) GetAllRunningApplications( + ctx context.Context, +) ([]Application, error) { + var ( + id uint64 + contractAddress Address + templateHash Hash + snapshotUri string + lastProcessedBlock uint64 + epochLength uint64 + status ApplicationStatus + results []Application + ) + + query := ` + SELECT + id, + contract_address, + template_hash, + snapshot_uri, + last_processed_block, + epoch_length, + status + FROM + application + WHERE + status='RUNNING' + ORDER BY + id asc` + + rows, err := pg.db.Query(ctx, query) + if err != nil { + return nil, fmt.Errorf("Query failed: %v\n", err) + } + + _, err = pgx.ForEachRow(rows, + []any{&id, &contractAddress, &templateHash, &snapshotUri, + &lastProcessedBlock, &epochLength, &status}, + func() error { + app := Application{ + Id: id, + ContractAddress: contractAddress, + TemplateHash: templateHash, + SnapshotURI: snapshotUri, + LastProcessedBlock: lastProcessedBlock, + EpochLength: epochLength, + Status: status, + } + results = append(results, app) + return nil + }) + if err != nil { + return nil, fmt.Errorf("ForEachRow failed: %w\n", err) + } + + return results, nil +} From 66ed1db7940347606ffc2981b1ad0c8897ad7beb Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 21:09:20 -0300 Subject: [PATCH 28/34] feat(data): add input reader repository tests --- internal/repository/inputreader_test.go | 137 ++++++++++++++++++++++++ 1 file changed, 137 insertions(+) create mode 100644 internal/repository/inputreader_test.go diff --git a/internal/repository/inputreader_test.go b/internal/repository/inputreader_test.go new file mode 100644 index 000000000..be106c9cc --- /dev/null +++ b/internal/repository/inputreader_test.go @@ -0,0 +1,137 @@ +// (c) Cartesi and individual authors (see AUTHORS) +// SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +package repository + +import ( + . "github.com/cartesi/rollups-node/internal/node/model" + "github.com/ethereum/go-ethereum/common" +) + +func (s *RepositorySuite) TestInsertInputsAndUpdateLastProcessedBlock() { + input0 := Input{ + Id: 5, + Index: 5, + CompletionStatus: InputStatusNone, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 5, + AppAddress: common.HexToAddress("deadbeef"), + } + + input1 := Input{ + Id: 6, + Index: 6, + CompletionStatus: InputStatusNone, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 6, + AppAddress: common.HexToAddress("deadbeef"), + } + + var inputs []Input + inputs = append(inputs, input0) + inputs = append(inputs, input1) + + err := s.database.InsertInputsAndUpdateLastProcessedBlock( + s.ctx, + inputs, + 6, + common.HexToAddress("deadbeef"), + ) + s.Require().Nil(err) + + response, err := s.database.GetInput(s.ctx, 5, common.HexToAddress("deadbeef")) + s.Require().Nil(err) + s.Require().Equal(&input0, response) + + var mostRecentCheck uint64 = 6 + response2, err := s.database.GetLastProcessedBlock(s.ctx, common.HexToAddress("deadbeef")) + s.Require().Nil(err) + s.Require().Equal(mostRecentCheck, response2) +} + +func (s *RepositorySuite) TestInsertInputsAndUpdateMostRecentFinalizedBlockEmptyInputs() { + var inputs []Input + + err := s.database.InsertInputsAndUpdateLastProcessedBlock( + s.ctx, + inputs, + 7, + common.HexToAddress("deadbeef"), + ) + s.Require().Nil(err) + + var block uint64 = 7 + response, err := s.database.GetLastProcessedBlock(s.ctx, common.HexToAddress("deadbeef")) + s.Require().Nil(err) + s.Require().Equal(block, response) +} + +func (s *RepositorySuite) TestInsertInputsAndUpdateLastProcessedBlockInputAlreadyExists() { + input := Input{ + Id: 5, + Index: 5, + CompletionStatus: InputStatusNone, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 5, + AppAddress: common.HexToAddress("deadbeef"), + } + + var inputs []Input + inputs = append(inputs, input) + + err := s.database.InsertInputsAndUpdateLastProcessedBlock( + s.ctx, + inputs, + 8, + common.HexToAddress("deadbeef"), + ) + s.Require().ErrorContains(err, "duplicate key value violates unique constraint") +} + +func (s *RepositorySuite) TestInsertInputsAndUpdateLastProcessedBlockDuplicateInput() { + input0 := Input{ + Id: 7, + Index: 7, + CompletionStatus: InputStatusNone, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 7, + AppAddress: common.HexToAddress("deadbeef"), + } + + input1 := Input{ + Id: 7, + Index: 7, + CompletionStatus: InputStatusNone, + RawData: common.Hex2Bytes("deadbeef"), + BlockNumber: 7, + AppAddress: common.HexToAddress("deadbeef"), + } + + var inputs []Input + inputs = append(inputs, input0) + inputs = append(inputs, input1) + + err := s.database.InsertInputsAndUpdateLastProcessedBlock( + s.ctx, + inputs, + 8, + common.HexToAddress("deadbeef"), + ) + s.Require().ErrorContains(err, "duplicate key value violates unique constraint") +} + +func (s *RepositorySuite) TestGetAllRunningApplications() { + app := Application{ + Id: 1, + ContractAddress: common.HexToAddress("deadbeef"), + TemplateHash: common.HexToHash("deadbeef"), + SnapshotURI: "this/is/a/test", + LastProcessedBlock: 1, + EpochLength: 10, + Status: ApplicationStatusRunning, + } + + response, err := s.database.GetAllRunningApplications(s.ctx) + s.Require().Nil(err) + s.Require().Equal(app, response[0]) +} From aa762a3eb3c940edc4214de7691017ec7c4b0e69 Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 22:24:22 -0300 Subject: [PATCH 29/34] feat(node): add run migrations to node startup --- cmd/cartesi-rollups-node/main.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/cmd/cartesi-rollups-node/main.go b/cmd/cartesi-rollups-node/main.go index 63a953cbe..3ed61bd42 100644 --- a/cmd/cartesi-rollups-node/main.go +++ b/cmd/cartesi-rollups-node/main.go @@ -5,6 +5,7 @@ package main import ( "context" + "fmt" "log/slog" "os" "os/signal" @@ -13,6 +14,9 @@ import ( "github.com/cartesi/rollups-node/internal/node" "github.com/cartesi/rollups-node/internal/node/config" + . "github.com/cartesi/rollups-node/internal/node/model" + "github.com/cartesi/rollups-node/internal/repository" + "github.com/ethereum/go-ethereum/common" "github.com/lmittmann/tint" "github.com/mattn/go-isatty" ) @@ -43,6 +47,26 @@ func main() { slog.SetDefault(logger) slog.Info("Starting the Cartesi Rollups Node", "version", buildVersion, "config", config) + // setup database + nodePersistentConfig := NodePersistentConfig{ + DefaultBlock: DefaultBlockStatusFinalized, + InputBoxDeploymentBlock: uint64(config.ContractsInputBoxDeploymentBlockNumber), + InputBoxAddress: common.HexToAddress(config.ContractsInputBoxAddress), + ChainId: config.BlockchainID, + IConsensusAddress: common.HexToAddress(config.ContractsIConsensusAddress), + } + + repository.RunMigrations(fmt.Sprintf("%v?sslmode=disable", config.PostgresEndpoint.Value)) + database, err := repository.Connect(ctx, config.PostgresEndpoint.Value) + if err != nil { + slog.Error("Node couldn't connect to the database", "error", err) + } + err = database.InsertNodeConfig(ctx, &nodePersistentConfig) + if err != nil { + slog.Error("Node couldn't insert database config", "error", err) + } + database.Close() + // create the node supervisor supervisor, err := node.Setup(ctx, config, "") if err != nil { From 1b24a6e7e7264d87d6a470c44117e047c1cb29d6 Mon Sep 17 00:00:00 2001 From: Gustavo Madeira Krieger Date: Mon, 8 Jul 2024 22:35:55 -0300 Subject: [PATCH 30/34] feat(data): add postgraphile views --- go.mod | 6 +- go.sum | 11 ++- internal/node/services.go | 3 +- .../000002_create_postgraphile_view.down.sql | 4 + .../000002_create_postgraphile_view.up.sql | 77 +++++++++++++++++++ 5 files changed, 91 insertions(+), 10 deletions(-) create mode 100644 internal/repository/migrations/000002_create_postgraphile_view.down.sql create mode 100644 internal/repository/migrations/000002_create_postgraphile_view.up.sql diff --git a/go.mod b/go.mod index 7150ee474..60f9ed4d9 100644 --- a/go.mod +++ b/go.mod @@ -16,9 +16,12 @@ require github.com/BurntSushi/toml v1.4.0 require ( github.com/Khan/genqlient v0.7.0 github.com/deepmap/oapi-codegen/v2 v2.1.0 + github.com/golang-migrate/migrate/v4 v4.17.1 + github.com/jackc/pgx/v5 v5.6.0 github.com/lmittmann/tint v1.0.4 github.com/mattn/go-isatty v0.0.20 github.com/oapi-codegen/runtime v1.1.1 + github.com/testcontainers/testcontainers-go/modules/postgres v0.32.0 golang.org/x/sync v0.7.0 golang.org/x/text v0.16.0 ) @@ -62,7 +65,6 @@ require ( github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang-migrate/migrate/v4 v4.17.1 // indirect github.com/google/uuid v1.6.0 // indirect github.com/gorilla/websocket v1.5.3 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect @@ -72,7 +74,6 @@ require ( github.com/invopop/yaml v0.2.0 // indirect github.com/jackc/pgpassfile v1.0.0 // indirect github.com/jackc/pgservicefile v0.0.0-20221227161230-091c0ba34f0a // indirect - github.com/jackc/pgx/v5 v5.6.0 // indirect github.com/jackc/puddle/v2 v2.2.1 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/klauspost/compress v1.17.9 // indirect @@ -102,7 +103,6 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/stretchr/objx v0.5.2 // indirect github.com/supranational/blst v0.3.12 // indirect - github.com/testcontainers/testcontainers-go/modules/postgres v0.32.0 // indirect github.com/tklauser/go-sysconf v0.3.14 // indirect github.com/tklauser/numcpus v0.8.0 // indirect github.com/vektah/gqlparser/v2 v2.5.16 // indirect diff --git a/go.sum b/go.sum index e36bc1924..29f0d46bc 100644 --- a/go.sum +++ b/go.sum @@ -98,10 +98,10 @@ github.com/deepmap/oapi-codegen/v2 v2.1.0 h1:I/NMVhJCtuvL9x+S2QzZKpSjGi33oDZwPRd github.com/deepmap/oapi-codegen/v2 v2.1.0/go.mod h1:R1wL226vc5VmCNJUvMyYr3hJMm5reyv25j952zAVXZ8= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48 h1:fRzb/w+pyskVMQ+UbP35JkH8yB7MYb4q/qhBarqZE6g= github.com/dgryski/trifles v0.0.0-20200323201526-dd97f9abfb48/go.mod h1:if7Fbed8SFyPtHLHbg49SI7NAdJiC5WIA09pe59rfAA= +github.com/dhui/dktest v0.4.1 h1:/w+IWuDXVymg3IrRJCHHOkMK10m9aNVMOyD0X12YVTg= +github.com/dhui/dktest v0.4.1/go.mod h1:DdOqcUpL7vgyP4GlF3X3w7HbSlz8cEQzwewPveYEQbA= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= -github.com/docker/docker v25.0.5+incompatible h1:UmQydMduGkrD5nQde1mecF/YnSbTOaPeFIeP5C4W+DE= -github.com/docker/docker v25.0.5+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v27.0.3+incompatible h1:aBGI9TeQ4MPlhquTQKq9XbK79rKFVwXNUAYz9aXyEBE= github.com/docker/docker v27.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c= @@ -310,8 +310,6 @@ github.com/supranational/blst v0.3.12 h1:Vfas2U2CFHhniv2QkUm2OVa1+pGTdqtpqm9NnhU github.com/supranational/blst v0.3.12/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY= github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc= -github.com/testcontainers/testcontainers-go v0.30.0 h1:jmn/XS22q4YRrcMwWg0pAwlClzs/abopbsBzrepyc4E= -github.com/testcontainers/testcontainers-go v0.30.0/go.mod h1:K+kHNGiM5zjklKjgTtcrEetF3uhWbMUyqAQoyoh8Pf0= github.com/testcontainers/testcontainers-go v0.32.0 h1:ug1aK08L3gCHdhknlTTwWjPHPS+/alvLJU/DRxTD/ME= github.com/testcontainers/testcontainers-go v0.32.0/go.mod h1:CRHrzHLQhlXUsa5gXjTOfqIEJcrK5+xMDmBr/WMI88E= github.com/testcontainers/testcontainers-go/modules/postgres v0.32.0 h1:ZE4dTdswj3P0j71nL+pL0m2e5HTXJwPoIFr+DDgdPaU= @@ -391,6 +389,8 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= @@ -427,9 +427,8 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gotest.tools/v3 v3.5.0 h1:Ljk6PdHdOhAb5aDMWXjDLMMhph+BpztA4v1QdqEW2eY= -gotest.tools/v3 v3.5.0/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU= +gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU= launchpad.net/gocheck v0.0.0-20140225173054-000000000087 h1:Izowp2XBH6Ya6rv+hqbceQyw/gSGoXfH/UPoTGduL54= launchpad.net/gocheck v0.0.0-20140225173054-000000000087/go.mod h1:hj7XX3B/0A+80Vse0e+BUHsHMTEhd0O4cpUHr/e/BUM= rsc.io/tmplfunc v0.0.3 h1:53XFQh69AfOa8Tw0Jm7t+GV7KZhOi6jzsCzTtKbMvzU= diff --git a/internal/node/services.go b/internal/node/services.go index cd5cd46af..704d72e6e 100644 --- a/internal/node/services.go +++ b/internal/node/services.go @@ -153,11 +153,12 @@ func newPostgraphileService(c config.NodeConfig, workDir string) services.Comman s.Args = append(s.Args, "--no-setof-functions-contain-nulls") s.Args = append(s.Args, "--no-ignore-rbac") s.Args = append(s.Args, "--enable-query-batching") + s.Args = append(s.Args, "--enhance-graphiql") s.Args = append(s.Args, "--extended-errors", "errcode") s.Args = append(s.Args, "--append-plugins", "@graphile-contrib/pg-simplify-inflector") s.Args = append(s.Args, "--legacy-relations", "omit") s.Args = append(s.Args, "--connection", fmt.Sprintf("%v", c.PostgresEndpoint.Value)) - s.Args = append(s.Args, "--schema", "public") + s.Args = append(s.Args, "--schema", "graphql") s.Args = append(s.Args, "--host", "0.0.0.0") s.Args = append(s.Args, "--port", fmt.Sprint(getPort(c, portOffsetPostgraphile))) s.Env = append(s.Env, os.Environ()...) diff --git a/internal/repository/migrations/000002_create_postgraphile_view.down.sql b/internal/repository/migrations/000002_create_postgraphile_view.down.sql new file mode 100644 index 000000000..0ba224155 --- /dev/null +++ b/internal/repository/migrations/000002_create_postgraphile_view.down.sql @@ -0,0 +1,4 @@ +-- (c) Cartesi and individual authors (see AUTHORS) +-- SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +DROP SCHEMA graphql CASCADE; \ No newline at end of file diff --git a/internal/repository/migrations/000002_create_postgraphile_view.up.sql b/internal/repository/migrations/000002_create_postgraphile_view.up.sql new file mode 100644 index 000000000..e1062596b --- /dev/null +++ b/internal/repository/migrations/000002_create_postgraphile_view.up.sql @@ -0,0 +1,77 @@ + +-- (c) Cartesi and individual authors (see AUTHORS) +-- SPDX-License-Identifier: Apache-2.0 (see LICENSE) + +CREATE SCHEMA IF NOT EXISTS graphql; + +CREATE OR REPLACE VIEW graphql."applications" AS + SELECT + "contract_address", + "template_hash", + "snapshot_uri", + "last_processed_block", + "epoch_length", + "status" + FROM + "application"; + +CREATE OR REPLACE VIEW graphql."inputs" AS + SELECT + "index", + "status", + "block_number", + "raw_data", + "machine_hash", + "outputs_hash", + "application_address" + FROM + "input"; + +CREATE OR REPLACE VIEW graphql."outputs" AS + SELECT + o."index", + o."raw_data", + o."output_hashes_siblings", + i."index" as "input_index" + FROM + "output" o + INNER JOIN + "input" i on o."input_id"=i."id"; + +CREATE OR REPLACE VIEW graphql."reports" AS + SELECT + r."index", + r."raw_data", + i."index" as "input_index" + FROM + "report" r + INNER JOIN + "input" i on r."input_id"=i."id"; + +CREATE OR REPLACE VIEW graphql."claims" AS + SELECT + c."index", + c."output_merkle_root_hash", + c."status", + c."application_address", + o."index" as "output_index" + FROM + "claim" c + INNER JOIN + "application" a ON c."application_address"=a."contract_address" + INNER JOIN + "input" i ON a."contract_address"=i."application_address" + INNER JOIN + "output" o ON i."id"=o."input_id"; + +COMMENT ON VIEW graphql."inputs" is + E'@foreignKey (application_address) references applications(contract_address)|@fieldName applicationByApplicationAddress'; + +COMMENT ON VIEW graphql."outputs" is + E'@foreignKey (input_index) references inputs(index)|@fieldName inputByInputIndex'; + +COMMENT ON VIEW graphql."reports" is + E'@foreignKey (input_index) references inputs(index)|@fieldName inputByInputIndex'; + +COMMENT ON VIEW graphql."claims" is + E'@foreignKey (output_index) references outputs(index)|@fieldName outputByOutputIndex\n@foreignKey (application_address) references applications(contract_address)|@fieldName applicationByApplicationAddress'; \ No newline at end of file From 6510ffe5542931e67c01ddbffc37f539b920832a Mon Sep 17 00:00:00 2001 From: Francisco Moura Date: Thu, 11 Jul 2024 16:14:47 -0300 Subject: [PATCH 31/34] fix(tests): update port usage for testcontainers --- go.mod | 4 +-- internal/deps/deps.go | 62 +++++++++++++++++++++++++++++++++++++------ 2 files changed, 56 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 60f9ed4d9..31ad19890 100644 --- a/go.mod +++ b/go.mod @@ -16,6 +16,8 @@ require github.com/BurntSushi/toml v1.4.0 require ( github.com/Khan/genqlient v0.7.0 github.com/deepmap/oapi-codegen/v2 v2.1.0 + github.com/docker/docker v27.0.3+incompatible + github.com/docker/go-connections v0.5.0 github.com/golang-migrate/migrate/v4 v4.17.1 github.com/jackc/pgx/v5 v5.6.0 github.com/lmittmann/tint v1.0.4 @@ -52,8 +54,6 @@ require ( github.com/deckarep/golang-set/v2 v2.6.0 // indirect github.com/decred/dcrd/dcrec/secp256k1/v4 v4.3.0 // indirect github.com/distribution/reference v0.6.0 // indirect - github.com/docker/docker v27.0.3+incompatible // indirect - github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/ethereum/c-kzg-4844 v1.0.2 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect diff --git a/internal/deps/deps.go b/internal/deps/deps.go index 7742fb830..b985d2a36 100644 --- a/internal/deps/deps.go +++ b/internal/deps/deps.go @@ -13,6 +13,8 @@ import ( "sync" "time" + "github.com/docker/docker/api/types/container" + "github.com/docker/go-connections/nat" "github.com/testcontainers/testcontainers-go" "github.com/testcontainers/testcontainers-go/wait" ) @@ -38,6 +40,11 @@ const ( devnetKey ) +const ( + postgresContainerPort = "5432/tcp" + devnetContainerPort = "8545/tcp" +) + // Struct to hold Node dependencies containers configurations type DepsConfig struct { Postgres *PostgresConfig @@ -159,6 +166,24 @@ func createHook(finishedWaitGroup *sync.WaitGroup) []testcontainers.ContainerLif } } +func buildPortMap(portSpec string) (nat.PortMap, error) { + portMappings, err := nat.ParsePortSpec(portSpec) + if err != nil { + return nil, err + } + + portMap := nat.PortMap{} + for _, portMapping := range portMappings { + portMap[portMapping.Port] = append( + portMap[portMapping.Port], + nat.PortBinding{ + HostIP: portMapping.Binding.HostIP, + HostPort: portMapping.Binding.HostPort, + }) + } + return portMap, nil +} + // Run starts the Node dependencies containers. // The returned DepContainers struct can be used to gracefully // terminate the containers using the Terminate method @@ -173,20 +198,30 @@ func Run(ctx context.Context, depsConfig DepsConfig) (*DepsContainers, error) { WithOccurrence(numPostgresCheckReadyAttempts). WithPollInterval(pollInterval) - postgresExposedPorts := "5432/tcp" + postgresPortSpec := postgresContainerPort if depsConfig.Postgres.Port != "" { - postgresExposedPorts = strings.Join([]string{ - depsConfig.Postgres.Port, ":", postgresExposedPorts}, "") + postgresPortSpec = strings.Join([]string{ + depsConfig.Postgres.Port, ":", postgresPortSpec}, "") + } + + portMap, err := buildPortMap(postgresPortSpec) + if err != nil { + return nil, err } + postgresReq := testcontainers.ContainerRequest{ Image: depsConfig.Postgres.DockerImage, - ExposedPorts: []string{postgresExposedPorts}, + ExposedPorts: []string{postgresContainerPort}, WaitingFor: postgresWaitStrategy, Env: map[string]string{ "POSTGRES_PASSWORD": depsConfig.Postgres.Password, }, LifecycleHooks: createHook(&finishedWaitGroup), + HostConfigModifier: func(hostConfig *container.HostConfig) { + hostConfig.PortBindings = portMap + }, } + postgres, err := testcontainers.GenericContainer( ctx, testcontainers.GenericContainerRequest{ @@ -204,11 +239,18 @@ func Run(ctx context.Context, depsConfig DepsConfig) (*DepsContainers, error) { if depsConfig.Devnet != nil { - devnetExposedPort := "8545/tcp" + devnetPortSpec := devnetContainerPort if depsConfig.Devnet.Port != "" { - devnetExposedPort = strings.Join([]string{ - depsConfig.Devnet.Port, ":", devnetExposedPort}, "") + devnetPortSpec = strings.Join([]string{ + depsConfig.Devnet.Port, ":", devnetPortSpec}, "") + } + + portMap, err := buildPortMap(devnetPortSpec) + + if err != nil { + return nil, err } + cmd := []string{ "anvil", "--load-state", @@ -225,11 +267,15 @@ func Run(ctx context.Context, depsConfig DepsConfig) (*DepsContainers, error) { } devNetReq := testcontainers.ContainerRequest{ Image: depsConfig.Devnet.DockerImage, - ExposedPorts: []string{devnetExposedPort}, + ExposedPorts: []string{devnetContainerPort}, WaitingFor: waitStrategy, Cmd: cmd, LifecycleHooks: createHook(&finishedWaitGroup), + HostConfigModifier: func(hostConfig *container.HostConfig) { + hostConfig.PortBindings = portMap + }, } + devnet, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ ContainerRequest: devNetReq, Started: true, From 841a401d85b6816cf3cb73802d6c1ab55766424b Mon Sep 17 00:00:00 2001 From: Francisco Moura Date: Thu, 11 Jul 2024 16:16:42 -0300 Subject: [PATCH 32/34] ci: disable reaper to avoid failure on testcontainers This issue seems related to https://github.com/testcontainers/testcontainers-go/issues/2172 --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ba1b16077..a92963a60 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -211,6 +211,8 @@ jobs: version: v1.58.2 - name: Run Go tests + env: + TESTCONTAINERS_RYUK_DISABLED: true run: go test ./... build-docker: From eddf0a06dbceee66648f0a94719fa739013123f7 Mon Sep 17 00:00:00 2001 From: Francisco Moura Date: Thu, 11 Jul 2024 16:17:22 -0300 Subject: [PATCH 33/34] fix(tests): mine blocks while adding inputs --- pkg/ethutil/ethutil_test.go | 72 +++++++++++++++++++++++++------------ 1 file changed, 50 insertions(+), 22 deletions(-) diff --git a/pkg/ethutil/ethutil_test.go b/pkg/ethutil/ethutil_test.go index 84f125e20..a1dad4cf3 100644 --- a/pkg/ethutil/ethutil_test.go +++ b/pkg/ethutil/ethutil_test.go @@ -6,6 +6,7 @@ package ethutil import ( "context" "io" + "sync" "testing" "time" @@ -19,6 +20,7 @@ import ( ) const testTimeout = 300 * time.Second +const inputBoxDeploymentBlockNumber = 0x10 // This suite sets up a container running a devnet Ethereum node, and connects to it using // go-ethereum's client. @@ -59,35 +61,62 @@ func (s *EthUtilSuite) TearDownTest() { } func (s *EthUtilSuite) TestAddInput() { - sender := common.HexToAddress("0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266") + + signer, err := NewMnemonicSigner(s.ctx, s.client, FoundryMnemonic, 0) + s.Require().Nil(err) + + sender := signer.Account() payload := common.Hex2Bytes("deadbeef") - inputIndex, err := AddInput(s.ctx, s.client, s.book, s.signer, payload) - if !s.Nil(err) { - s.logDevnetOutput() - s.T().FailNow() - } - s.Require().Equal(0, inputIndex) + indexChan := make(chan int) + errChan := make(chan error) - event, err := GetInputFromInputBox(s.client, s.book, inputIndex) - s.Require().Nil(err) + waitGroup := sync.WaitGroup{} + waitGroup.Add(1) - inputsABI, err := inputs.InputsMetaData.GetAbi() - s.Require().Nil(err) - advanceInputABI := inputsABI.Methods["EvmAdvance"] - inputArgs := map[string]interface{}{} - err = advanceInputABI.Inputs.UnpackIntoMap(inputArgs, event.Input[4:]) + go func() { + waitGroup.Done() + inputIndex, err := AddInput(s.ctx, s.client, s.book, s.signer, payload) + if err != nil { + errChan <- err + return + } + indexChan <- inputIndex + }() + + waitGroup.Wait() + time.Sleep(1 * time.Second) + blockNumber, err := MineNewBlock(s.ctx, s.endpoint) s.Require().Nil(err) + s.Require().Equal(uint64(inputBoxDeploymentBlockNumber+1), blockNumber) - s.T().Log(inputArgs) - s.Require().Equal(sender, inputArgs["msgSender"]) - s.Require().Equal(payload, inputArgs["payload"]) + select { + case err := <-errChan: + s.logDevnetOutput() + s.Require().FailNow("Unexpected Error", err) + case inputIndex := <-indexChan: + s.Require().Equal(0, inputIndex) + + event, err := GetInputFromInputBox(s.client, s.book, inputIndex) + s.Require().Nil(err) + + inputsABI, err := inputs.InputsMetaData.GetAbi() + s.Require().Nil(err) + advanceInputABI := inputsABI.Methods["EvmAdvance"] + inputArgs := map[string]interface{}{} + err = advanceInputABI.Inputs.UnpackIntoMap(inputArgs, event.Input[4:]) + s.Require().Nil(err) + + s.T().Log(inputArgs) + s.Require().Equal(sender, inputArgs["msgSender"]) + s.Require().Equal(payload, inputArgs["payload"]) + } } func (s *EthUtilSuite) TestMineNewBlock() { blockNumber, err := MineNewBlock(s.ctx, s.endpoint) s.Require().Nil(err) - s.Require().Equal(uint64(22), blockNumber) + s.Require().Equal(uint64(inputBoxDeploymentBlockNumber+1), blockNumber) } @@ -112,10 +141,9 @@ func newDevNetContainer(ctx context.Context) (*deps.DepsContainers, error) { container, err := deps.Run(ctx, deps.DepsConfig{ Devnet: &deps.DevnetConfig{ - DockerImage: deps.DefaultDevnetDockerImage, - BlockTime: deps.DefaultDevnetBlockTime, - BlockToWaitForOnStartup: deps.DefaultDevnetBlockToWaitForOnStartup, - Port: testutil.GetCartesiTestDepsPortRange(), + DockerImage: deps.DefaultDevnetDockerImage, + NoMining: true, + Port: testutil.GetCartesiTestDepsPortRange(), }, }) if err != nil { From 0f9100b6e5eb062619030d3c23edb3e49c362c6d Mon Sep 17 00:00:00 2001 From: Danilo Tuler Date: Wed, 6 Dec 2023 11:20:59 -0300 Subject: [PATCH 34/34] feat: openapi for application management --- api/openapi/management.yaml | 246 ++++++++++++++++++++++++++++++++++++ 1 file changed, 246 insertions(+) create mode 100644 api/openapi/management.yaml diff --git a/api/openapi/management.yaml b/api/openapi/management.yaml new file mode 100644 index 000000000..5920126e9 --- /dev/null +++ b/api/openapi/management.yaml @@ -0,0 +1,246 @@ +openapi: 3.0.0 +info: + title: Cartesi Rollups Node Management API + description: |- + This is a management API for the Cartesi Rollups Node. It allows to dynamically add and remove applications managed by the node, or query the list of managed applications. + license: + name: Apache 2.0 + url: http://www.apache.org/licenses/LICENSE-2.0.html + version: 1.0.0 +tags: + - name: applications + description: Cartesi applications +paths: + /applications: + get: + tags: + - applications + summary: Fetches list of applications managed by the node + description: List of managed applications + operationId: getApplications + parameters: + - name: status + in: query + description: Filter by application status + required: false + schema: + $ref: "#/components/schemas/ApplicationStatus" + - name: offset + in: query + description: Items to skip for pagination + required: false + schema: + type: integer + minimum: 0 + default: 0 + - name: limit + in: query + description: Maximum number of items per page + required: false + schema: + type: integer + minimum: 1 + default: 100 + responses: + "200": + description: Success + content: + application/json: + schema: + allOf: + - $ref: "#/components/schemas/PaginatedResult" + - type: object + properties: + data: + type: array + items: + $ref: "#/components/schemas/Application" + required: + - data + /applications/{address}: + get: + tags: + - applications + summary: Find application by address + description: Returns a single application by its address + operationId: getApplicationByAddress + parameters: + - name: address + in: path + description: Address of application to return + required: true + schema: + $ref: "#/components/schemas/Address" + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/Application" + "400": + $ref: "#/components/responses/BadRequest" + "404": + $ref: "#/components/responses/NotFound" + put: + tags: + - applications + summary: Adds an application + description: Adds an application by its address + operationId: addApplication + parameters: + - name: address + in: path + description: Application address + required: true + schema: + $ref: "#/components/schemas/Address" + requestBody: + $ref: "#/components/requestBodies/Application" + responses: + "200": + description: Success + content: + application/json: + schema: + $ref: "#/components/schemas/Application" + "400": + $ref: "#/components/responses/BadRequest" + "409": + $ref: "#/components/responses/Conflict" + delete: + tags: + - applications + summary: Deletes an application + description: delete an application + operationId: deleteApplication + parameters: + - name: address + in: path + description: Application address to delete + required: true + schema: + $ref: "#/components/schemas/Address" + responses: + "202": + description: Delete request accepted + "204": + description: Delete successful + "400": + $ref: "#/components/responses/BadRequest" + "404": + $ref: "#/components/responses/NotFound" +components: + schemas: + PaginatedResult: + type: object + properties: + total_count: + type: integer + offset: + type: integer + limit: + type: integer + data: + type: array + items: {} + required: + - total_count + - offset + - limit + - data + Address: + type: string + format: address + example: "0xf39Fd6e51aad88F6F4ce6aB8827279cffFb92266" + pattern: "^0x([0-9a-fA-F]{40})$" + Hash: + type: string + format: hex + pattern: "0x^([0-9a-fA-F]{64})$" + example: "0x9a8d6ce861c71b592c236013b5c3fa167501431def4ad86ff59a3b12aa331dfc" + ApplicationStatus: + type: string + description: Application status + enum: + - downloading + - started + - starting + - stopping + - error + Application: + required: + - address + - blockNumber + - templateHash + - snapshotUri + - status + type: object + properties: + address: + $ref: "#/components/schemas/Address" + blockNumber: + type: integer + example: 456311 + templateHash: + $ref: "#/components/schemas/Hash" + snapshotUri: + type: string + format: uri + status: + $ref: "#/components/schemas/ApplicationStatus" + error: + type: string + Error: + type: object + properties: + error: + type: string + message: + type: string + statusCode: + type: integer + required: + - error + - message + - statusCode + responses: + BadRequest: + description: Bad request + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + NotFound: + description: The specified resource was not found + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + Conflict: + description: Request conflict with the current state of the target resource + content: + application/json: + schema: + $ref: "#/components/schemas/Error" + requestBodies: + Application: + description: Application to be added to the node + required: true + content: + application/json: + schema: + type: object + properties: + blockNumber: + type: integer + example: 456311 + templateHash: + $ref: "#/components/schemas/Hash" + snapshotUri: + type: string + format: uri + required: + - blockNumber + - templateHash + - snapshotUri