From 152a822c7f48c341816d2323f66d8978de3c432c Mon Sep 17 00:00:00 2001 From: Artur Troian Date: Fri, 3 May 2024 13:56:25 -0400 Subject: [PATCH] feat: clean v1beta3 Signed-off-by: Artur Troian --- docs/proto/node.md | 5675 ++++++++++++----- docs/proto/provider.md | 212 + go/manifest/v2beta3/errors.go | 10 + go/manifest/v2beta3/group.go | 79 + go/manifest/v2beta3/group.pb.go | 399 ++ go/manifest/v2beta3/groups.go | 88 + go/manifest/v2beta3/helpers.go | 98 + go/manifest/v2beta3/httpoptions.pb.go | 536 ++ go/manifest/v2beta3/manifest.go | 67 + .../v2beta3/manifest_cross_validation_test.go | 196 + go/manifest/v2beta3/manifest_test.go | 469 ++ go/manifest/v2beta3/parse.go | 65 + go/manifest/v2beta3/service.go | 108 + go/manifest/v2beta3/service.pb.go | 1680 +++++ go/manifest/v2beta3/service_expose_test.go | 45 + go/manifest/v2beta3/serviceexpose.go | 108 + go/manifest/v2beta3/serviceexpose.pb.go | 666 ++ go/manifest/v2beta3/serviceexposes.go | 51 + go/manifest/v2beta3/services.go | 21 + go/node/audit/v1beta3/errors.go | 14 +- go/node/audit/v1beta4/audit.pb.go | 2080 ++++++ go/node/audit/v1beta4/codec.go | 43 + go/node/audit/v1beta4/errors.go | 22 + go/node/audit/v1beta4/event.go | 118 + go/node/audit/v1beta4/genesis.pb.go | 332 + go/node/audit/v1beta4/key.go | 16 + go/node/audit/v1beta4/msgs.go | 94 + go/node/audit/v1beta4/query.pb.go | 1718 +++++ go/node/audit/v1beta4/query.pb.gw.go | 532 ++ go/node/audit/v1beta4/types.go | 33 + go/node/client/client.go | 14 +- go/node/client/testutil/v1beta3/base.go | 121 + go/node/client/testutil/v1beta3/cert.go | 211 + .../client/testutil/v1beta3/channel_wait.go | 71 + go/node/client/testutil/v1beta3/deployment.go | 61 + go/node/client/testutil/v1beta3/ids.go | 102 + go/node/client/testutil/v1beta3/log.go | 27 + go/node/client/testutil/v1beta3/sdk.go | 43 + go/node/client/testutil/v1beta3/types.go | 23 + go/node/client/v1beta3/client.go | 143 + go/node/client/v1beta3/errors.go | 11 + go/node/client/v1beta3/mocks/client.go | 269 + go/node/client/v1beta3/mocks/node_client.go | 95 + go/node/client/v1beta3/mocks/query_client.go | 1866 ++++++ go/node/client/v1beta3/mocks/tx_client.go | 113 + go/node/client/v1beta3/node.go | 34 + go/node/client/v1beta3/options.go | 7 + go/node/client/v1beta3/query.go | 277 + go/node/client/v1beta3/tx.go | 650 ++ go/node/deployment/v1beta3/errors.go | 74 +- go/node/deployment/v1beta4/authz.pb.go | 333 + go/node/deployment/v1beta4/codec.go | 58 + go/node/deployment/v1beta4/deployment.pb.go | 960 +++ .../v1beta4/deployment_validation_test.go | 202 + .../deployment/v1beta4/deploymentmsg.pb.go | 1722 +++++ .../deposit_deployment_authorization.go | 45 + go/node/deployment/v1beta4/errors.go | 81 + go/node/deployment/v1beta4/escrow.go | 25 + go/node/deployment/v1beta4/event.go | 309 + go/node/deployment/v1beta4/events_test.go | 336 + go/node/deployment/v1beta4/genesis.pb.go | 630 ++ go/node/deployment/v1beta4/group.pb.go | 505 ++ .../deployment/v1beta4/group_validation.go | 35 + go/node/deployment/v1beta4/groupid.pb.go | 395 ++ go/node/deployment/v1beta4/groupmsg.pb.go | 1034 +++ go/node/deployment/v1beta4/groupspec.go | 196 + go/node/deployment/v1beta4/groupspec.pb.go | 427 ++ go/node/deployment/v1beta4/id.go | 103 + go/node/deployment/v1beta4/key.go | 20 + go/node/deployment/v1beta4/migrate/v1beta3.go | 51 + go/node/deployment/v1beta4/msgs.go | 333 + go/node/deployment/v1beta4/msgs_test.go | 83 + go/node/deployment/v1beta4/params.go | 91 + go/node/deployment/v1beta4/params.pb.go | 340 + go/node/deployment/v1beta4/query.pb.go | 1628 +++++ go/node/deployment/v1beta4/query.pb.gw.go | 337 + .../v1beta4/resource_list_validation.go | 187 + .../v1beta4/resource_list_validation_test.go | 280 + go/node/deployment/v1beta4/resourcelimits.go | 38 + go/node/deployment/v1beta4/resourceunit.go | 166 + go/node/deployment/v1beta4/resourceunit.pb.go | 445 ++ go/node/deployment/v1beta4/resourceunits.go | 82 + go/node/deployment/v1beta4/service.pb.go | 365 ++ go/node/deployment/v1beta4/types.go | 122 + go/node/deployment/v1beta4/types_test.go | 461 ++ .../deployment/v1beta4/validation_config.go | 118 + go/node/market/v1beta4/errors.go | 101 +- go/node/market/v1beta5/bid.go | 82 + go/node/market/v1beta5/bid.pb.go | 2338 +++++++ go/node/market/v1beta5/bid_test.go | 48 + go/node/market/v1beta5/codec.go | 50 + go/node/market/v1beta5/errors.go | 107 + go/node/market/v1beta5/escrow.go | 60 + go/node/market/v1beta5/event.go | 359 ++ go/node/market/v1beta5/events_test.go | 459 ++ go/node/market/v1beta5/genesis.pb.go | 518 ++ go/node/market/v1beta5/id.go | 154 + go/node/market/v1beta5/key.go | 28 + go/node/market/v1beta5/lease.pb.go | 2134 +++++++ go/node/market/v1beta5/migrate/v1beta4.go | 40 + go/node/market/v1beta5/msgs.go | 217 + go/node/market/v1beta5/order.pb.go | 1107 ++++ go/node/market/v1beta5/params.go | 77 + go/node/market/v1beta5/params.pb.go | 365 ++ go/node/market/v1beta5/query.pb.go | 3035 +++++++++ go/node/market/v1beta5/query.pb.gw.go | 586 ++ go/node/market/v1beta5/service.pb.go | 287 + go/node/market/v1beta5/types.go | 244 + go/node/provider/v1beta3/errors.go | 29 +- go/node/provider/v1beta4/codec.go | 45 + go/node/provider/v1beta4/errors.go | 42 + go/node/provider/v1beta4/event.go | 142 + go/node/provider/v1beta4/events_test.go | 195 + go/node/provider/v1beta4/genesis.pb.go | 334 + go/node/provider/v1beta4/key.go | 12 + go/node/provider/v1beta4/migrate/v1beta3.go | 23 + go/node/provider/v1beta4/msgs.go | 177 + go/node/provider/v1beta4/msgs_test.go | 255 + go/node/provider/v1beta4/provider.pb.go | 2102 ++++++ go/node/provider/v1beta4/query.pb.go | 1059 +++ go/node/provider/v1beta4/query.pb.gw.go | 272 + go/node/provider/v1beta4/types.go | 67 + go/node/types/attributes/v1/attribute.go | 381 ++ go/node/types/attributes/v1/attribute.pb.go | 812 +++ go/node/types/attributes/v1/attribute_test.go | 182 + .../types/attributes/v1/migrate/v1beta3.go | 35 + go/node/types/resources/v1/cpu.pb.go | 424 ++ go/node/types/resources/v1/endpoint.go | 29 + go/node/types/resources/v1/endpoint.pb.go | 408 ++ go/node/types/resources/v1/gpu.pb.go | 424 ++ go/node/types/resources/v1/memory.pb.go | 428 ++ go/node/types/resources/v1/migrate/v1beta3.go | 82 + go/node/types/resources/v1/requirements.go | 15 + go/node/types/resources/v1/resources.go | 194 + go/node/types/resources/v1/resources.pb.go | 677 ++ go/node/types/resources/v1/resources_test.go | 143 + go/node/types/resources/v1/resourcevalue.go | 57 + .../types/resources/v1/resourcevalue.pb.go | 343 + go/node/types/resources/v1/storage.pb.go | 483 ++ go/node/types/v1beta3/attribute.go | 11 +- proto/node/akash/audit/v1beta4/audit.proto | 124 + proto/node/akash/audit/v1beta4/genesis.proto | 17 + proto/node/akash/audit/v1beta4/query.proto | 82 + .../akash/base/attributes/v1/attribute.proto | 54 + proto/node/akash/base/resources/v1/cpu.proto | 23 + .../akash/base/resources/v1/endpoint.proto | 28 + proto/node/akash/base/resources/v1/gpu.proto | 22 + .../node/akash/base/resources/v1/memory.proto | 25 + .../akash/base/resources/v1/resources.proto | 52 + .../base/resources/v1/resourcevalue.proto | 15 + .../akash/base/resources/v1/storage.proto | 28 + .../node/akash/deployment/v1beta4/authz.proto | 21 + .../akash/deployment/v1beta4/deployment.proto | 75 + .../deployment/v1beta4/deploymentmsg.proto | 106 + .../akash/deployment/v1beta4/genesis.proto | 39 + .../node/akash/deployment/v1beta4/group.proto | 58 + .../akash/deployment/v1beta4/groupid.proto | 27 + .../akash/deployment/v1beta4/groupmsg.proto | 52 + .../akash/deployment/v1beta4/groupspec.proto | 32 + .../akash/deployment/v1beta4/params.proto | 19 + .../node/akash/deployment/v1beta4/query.proto | 90 + .../deployment/v1beta4/resourceunit.proto | 29 + .../akash/deployment/v1beta4/service.proto | 31 + proto/node/akash/market/v1beta5/bid.proto | 199 + proto/node/akash/market/v1beta5/genesis.proto | 35 + proto/node/akash/market/v1beta5/lease.proto | 166 + proto/node/akash/market/v1beta5/order.proto | 109 + proto/node/akash/market/v1beta5/params.proto | 22 + proto/node/akash/market/v1beta5/query.proto | 151 + proto/node/akash/market/v1beta5/service.proto | 25 + .../node/akash/provider/v1beta4/genesis.proto | 16 + .../akash/provider/v1beta4/provider.proto | 120 + proto/node/akash/provider/v1beta4/query.proto | 49 + .../akash/manifest/v2beta3/group.proto | 27 + .../akash/manifest/v2beta3/httpoptions.proto | 38 + .../akash/manifest/v2beta3/service.proto | 106 + .../manifest/v2beta3/serviceexpose.proto | 60 + script/shellcheck.sh | 2 +- ts/src/generated/akash/audit/v1beta4/audit.ts | 880 +++ .../generated/akash/audit/v1beta4/genesis.ts | 112 + ts/src/generated/akash/audit/v1beta4/query.ts | 765 +++ .../akash/base/attributes/v1/attribute.ts | 334 + .../generated/akash/base/resources/v1/cpu.ts | 135 + .../akash/base/resources/v1/endpoint.ts | 173 + .../generated/akash/base/resources/v1/gpu.ts | 135 + .../akash/base/resources/v1/memory.ts | 138 + .../akash/base/resources/v1/resources.ts | 218 + .../akash/base/resources/v1/resourcevalue.ts | 136 + .../akash/base/resources/v1/storage.ts | 155 + .../akash/deployment/v1beta4/authz.ts | 134 + .../akash/deployment/v1beta4/deployment.ts | 456 ++ .../akash/deployment/v1beta4/deploymentmsg.ts | 788 +++ .../akash/deployment/v1beta4/genesis.ts | 242 + .../akash/deployment/v1beta4/group.ts | 233 + .../akash/deployment/v1beta4/groupid.ts | 148 + .../akash/deployment/v1beta4/groupmsg.ts | 443 ++ .../akash/deployment/v1beta4/groupspec.ts | 161 + .../akash/deployment/v1beta4/params.ts | 108 + .../akash/deployment/v1beta4/query.ts | 706 ++ .../akash/deployment/v1beta4/resourceunit.ts | 155 + .../deployment/v1beta4/service.grpc-js.ts | 348 + .../akash/deployment/v1beta4/service.ts | 134 + .../generated/akash/manifest/v2beta3/group.ts | 125 + .../akash/manifest/v2beta3/httpoptions.ts | 218 + .../akash/manifest/v2beta3/service.grpc-js.ts | 623 ++ .../akash/manifest/v2beta3/service.ts | 621 ++ .../akash/manifest/v2beta3/serviceexpose.ts | 269 + ts/src/generated/akash/market/v1beta5/bid.ts | 1030 +++ .../generated/akash/market/v1beta5/genesis.ts | 175 + .../generated/akash/market/v1beta5/lease.ts | 980 +++ .../generated/akash/market/v1beta5/order.ts | 502 ++ .../generated/akash/market/v1beta5/params.ts | 136 + .../generated/akash/market/v1beta5/query.ts | 1275 ++++ .../akash/market/v1beta5/service.grpc-js.ts | 252 + .../generated/akash/market/v1beta5/service.ts | 92 + .../akash/provider/v1beta4/genesis.ts | 108 + .../akash/provider/v1beta4/provider.ts | 869 +++ .../generated/akash/provider/v1beta4/query.ts | 443 ++ ts/src/generated/index.akash.audit.ts | 2 +- ts/src/generated/index.akash.audit.v1beta4.ts | 5 + .../generated/index.akash.base.attributes.ts | 3 + .../index.akash.base.attributes.v1.ts | 3 + .../generated/index.akash.base.resources.ts | 3 + .../index.akash.base.resources.v1.ts | 9 + .../index.akash.deployment.v1beta4.grpc-js.ts | 1 + .../index.akash.deployment.v1beta4.ts | 4 + .../index.akash.manifest.v2beta3.grpc-js.ts | 1 + .../generated/index.akash.manifest.v2beta3.ts | 6 + .../index.akash.market.v1beta5.grpc-js.ts | 1 + .../generated/index.akash.market.v1beta5.ts | 9 + .../generated/index.akash.provider.v1beta4.ts | 5 + 231 files changed, 69870 insertions(+), 1919 deletions(-) create mode 100644 go/manifest/v2beta3/errors.go create mode 100644 go/manifest/v2beta3/group.go create mode 100644 go/manifest/v2beta3/group.pb.go create mode 100644 go/manifest/v2beta3/groups.go create mode 100644 go/manifest/v2beta3/helpers.go create mode 100644 go/manifest/v2beta3/httpoptions.pb.go create mode 100644 go/manifest/v2beta3/manifest.go create mode 100644 go/manifest/v2beta3/manifest_cross_validation_test.go create mode 100644 go/manifest/v2beta3/manifest_test.go create mode 100644 go/manifest/v2beta3/parse.go create mode 100644 go/manifest/v2beta3/service.go create mode 100644 go/manifest/v2beta3/service.pb.go create mode 100644 go/manifest/v2beta3/service_expose_test.go create mode 100644 go/manifest/v2beta3/serviceexpose.go create mode 100644 go/manifest/v2beta3/serviceexpose.pb.go create mode 100644 go/manifest/v2beta3/serviceexposes.go create mode 100644 go/manifest/v2beta3/services.go create mode 100644 go/node/audit/v1beta4/audit.pb.go create mode 100644 go/node/audit/v1beta4/codec.go create mode 100644 go/node/audit/v1beta4/errors.go create mode 100644 go/node/audit/v1beta4/event.go create mode 100644 go/node/audit/v1beta4/genesis.pb.go create mode 100644 go/node/audit/v1beta4/key.go create mode 100644 go/node/audit/v1beta4/msgs.go create mode 100644 go/node/audit/v1beta4/query.pb.go create mode 100644 go/node/audit/v1beta4/query.pb.gw.go create mode 100644 go/node/audit/v1beta4/types.go create mode 100644 go/node/client/testutil/v1beta3/base.go create mode 100644 go/node/client/testutil/v1beta3/cert.go create mode 100644 go/node/client/testutil/v1beta3/channel_wait.go create mode 100644 go/node/client/testutil/v1beta3/deployment.go create mode 100644 go/node/client/testutil/v1beta3/ids.go create mode 100644 go/node/client/testutil/v1beta3/log.go create mode 100644 go/node/client/testutil/v1beta3/sdk.go create mode 100644 go/node/client/testutil/v1beta3/types.go create mode 100644 go/node/client/v1beta3/client.go create mode 100644 go/node/client/v1beta3/errors.go create mode 100644 go/node/client/v1beta3/mocks/client.go create mode 100644 go/node/client/v1beta3/mocks/node_client.go create mode 100644 go/node/client/v1beta3/mocks/query_client.go create mode 100644 go/node/client/v1beta3/mocks/tx_client.go create mode 100644 go/node/client/v1beta3/node.go create mode 100644 go/node/client/v1beta3/options.go create mode 100644 go/node/client/v1beta3/query.go create mode 100644 go/node/client/v1beta3/tx.go create mode 100644 go/node/deployment/v1beta4/authz.pb.go create mode 100644 go/node/deployment/v1beta4/codec.go create mode 100644 go/node/deployment/v1beta4/deployment.pb.go create mode 100644 go/node/deployment/v1beta4/deployment_validation_test.go create mode 100644 go/node/deployment/v1beta4/deploymentmsg.pb.go create mode 100644 go/node/deployment/v1beta4/deposit_deployment_authorization.go create mode 100644 go/node/deployment/v1beta4/errors.go create mode 100644 go/node/deployment/v1beta4/escrow.go create mode 100644 go/node/deployment/v1beta4/event.go create mode 100644 go/node/deployment/v1beta4/events_test.go create mode 100644 go/node/deployment/v1beta4/genesis.pb.go create mode 100644 go/node/deployment/v1beta4/group.pb.go create mode 100644 go/node/deployment/v1beta4/group_validation.go create mode 100644 go/node/deployment/v1beta4/groupid.pb.go create mode 100644 go/node/deployment/v1beta4/groupmsg.pb.go create mode 100644 go/node/deployment/v1beta4/groupspec.go create mode 100644 go/node/deployment/v1beta4/groupspec.pb.go create mode 100644 go/node/deployment/v1beta4/id.go create mode 100644 go/node/deployment/v1beta4/key.go create mode 100644 go/node/deployment/v1beta4/migrate/v1beta3.go create mode 100644 go/node/deployment/v1beta4/msgs.go create mode 100644 go/node/deployment/v1beta4/msgs_test.go create mode 100644 go/node/deployment/v1beta4/params.go create mode 100644 go/node/deployment/v1beta4/params.pb.go create mode 100644 go/node/deployment/v1beta4/query.pb.go create mode 100644 go/node/deployment/v1beta4/query.pb.gw.go create mode 100644 go/node/deployment/v1beta4/resource_list_validation.go create mode 100644 go/node/deployment/v1beta4/resource_list_validation_test.go create mode 100644 go/node/deployment/v1beta4/resourcelimits.go create mode 100644 go/node/deployment/v1beta4/resourceunit.go create mode 100644 go/node/deployment/v1beta4/resourceunit.pb.go create mode 100644 go/node/deployment/v1beta4/resourceunits.go create mode 100644 go/node/deployment/v1beta4/service.pb.go create mode 100644 go/node/deployment/v1beta4/types.go create mode 100644 go/node/deployment/v1beta4/types_test.go create mode 100644 go/node/deployment/v1beta4/validation_config.go create mode 100644 go/node/market/v1beta5/bid.go create mode 100644 go/node/market/v1beta5/bid.pb.go create mode 100644 go/node/market/v1beta5/bid_test.go create mode 100644 go/node/market/v1beta5/codec.go create mode 100644 go/node/market/v1beta5/errors.go create mode 100644 go/node/market/v1beta5/escrow.go create mode 100644 go/node/market/v1beta5/event.go create mode 100644 go/node/market/v1beta5/events_test.go create mode 100644 go/node/market/v1beta5/genesis.pb.go create mode 100644 go/node/market/v1beta5/id.go create mode 100644 go/node/market/v1beta5/key.go create mode 100644 go/node/market/v1beta5/lease.pb.go create mode 100644 go/node/market/v1beta5/migrate/v1beta4.go create mode 100644 go/node/market/v1beta5/msgs.go create mode 100644 go/node/market/v1beta5/order.pb.go create mode 100644 go/node/market/v1beta5/params.go create mode 100644 go/node/market/v1beta5/params.pb.go create mode 100644 go/node/market/v1beta5/query.pb.go create mode 100644 go/node/market/v1beta5/query.pb.gw.go create mode 100644 go/node/market/v1beta5/service.pb.go create mode 100644 go/node/market/v1beta5/types.go create mode 100644 go/node/provider/v1beta4/codec.go create mode 100644 go/node/provider/v1beta4/errors.go create mode 100644 go/node/provider/v1beta4/event.go create mode 100644 go/node/provider/v1beta4/events_test.go create mode 100644 go/node/provider/v1beta4/genesis.pb.go create mode 100644 go/node/provider/v1beta4/key.go create mode 100644 go/node/provider/v1beta4/migrate/v1beta3.go create mode 100644 go/node/provider/v1beta4/msgs.go create mode 100644 go/node/provider/v1beta4/msgs_test.go create mode 100644 go/node/provider/v1beta4/provider.pb.go create mode 100644 go/node/provider/v1beta4/query.pb.go create mode 100644 go/node/provider/v1beta4/query.pb.gw.go create mode 100644 go/node/provider/v1beta4/types.go create mode 100644 go/node/types/attributes/v1/attribute.go create mode 100644 go/node/types/attributes/v1/attribute.pb.go create mode 100644 go/node/types/attributes/v1/attribute_test.go create mode 100644 go/node/types/attributes/v1/migrate/v1beta3.go create mode 100644 go/node/types/resources/v1/cpu.pb.go create mode 100644 go/node/types/resources/v1/endpoint.go create mode 100644 go/node/types/resources/v1/endpoint.pb.go create mode 100644 go/node/types/resources/v1/gpu.pb.go create mode 100644 go/node/types/resources/v1/memory.pb.go create mode 100644 go/node/types/resources/v1/migrate/v1beta3.go create mode 100644 go/node/types/resources/v1/requirements.go create mode 100644 go/node/types/resources/v1/resources.go create mode 100644 go/node/types/resources/v1/resources.pb.go create mode 100644 go/node/types/resources/v1/resources_test.go create mode 100644 go/node/types/resources/v1/resourcevalue.go create mode 100644 go/node/types/resources/v1/resourcevalue.pb.go create mode 100644 go/node/types/resources/v1/storage.pb.go create mode 100644 proto/node/akash/audit/v1beta4/audit.proto create mode 100644 proto/node/akash/audit/v1beta4/genesis.proto create mode 100644 proto/node/akash/audit/v1beta4/query.proto create mode 100644 proto/node/akash/base/attributes/v1/attribute.proto create mode 100644 proto/node/akash/base/resources/v1/cpu.proto create mode 100644 proto/node/akash/base/resources/v1/endpoint.proto create mode 100644 proto/node/akash/base/resources/v1/gpu.proto create mode 100644 proto/node/akash/base/resources/v1/memory.proto create mode 100644 proto/node/akash/base/resources/v1/resources.proto create mode 100644 proto/node/akash/base/resources/v1/resourcevalue.proto create mode 100644 proto/node/akash/base/resources/v1/storage.proto create mode 100644 proto/node/akash/deployment/v1beta4/authz.proto create mode 100644 proto/node/akash/deployment/v1beta4/deployment.proto create mode 100644 proto/node/akash/deployment/v1beta4/deploymentmsg.proto create mode 100644 proto/node/akash/deployment/v1beta4/genesis.proto create mode 100644 proto/node/akash/deployment/v1beta4/group.proto create mode 100644 proto/node/akash/deployment/v1beta4/groupid.proto create mode 100644 proto/node/akash/deployment/v1beta4/groupmsg.proto create mode 100644 proto/node/akash/deployment/v1beta4/groupspec.proto create mode 100644 proto/node/akash/deployment/v1beta4/params.proto create mode 100644 proto/node/akash/deployment/v1beta4/query.proto create mode 100644 proto/node/akash/deployment/v1beta4/resourceunit.proto create mode 100644 proto/node/akash/deployment/v1beta4/service.proto create mode 100644 proto/node/akash/market/v1beta5/bid.proto create mode 100644 proto/node/akash/market/v1beta5/genesis.proto create mode 100644 proto/node/akash/market/v1beta5/lease.proto create mode 100644 proto/node/akash/market/v1beta5/order.proto create mode 100644 proto/node/akash/market/v1beta5/params.proto create mode 100644 proto/node/akash/market/v1beta5/query.proto create mode 100644 proto/node/akash/market/v1beta5/service.proto create mode 100644 proto/node/akash/provider/v1beta4/genesis.proto create mode 100644 proto/node/akash/provider/v1beta4/provider.proto create mode 100644 proto/node/akash/provider/v1beta4/query.proto create mode 100644 proto/provider/akash/manifest/v2beta3/group.proto create mode 100644 proto/provider/akash/manifest/v2beta3/httpoptions.proto create mode 100644 proto/provider/akash/manifest/v2beta3/service.proto create mode 100644 proto/provider/akash/manifest/v2beta3/serviceexpose.proto create mode 100644 ts/src/generated/akash/audit/v1beta4/audit.ts create mode 100644 ts/src/generated/akash/audit/v1beta4/genesis.ts create mode 100644 ts/src/generated/akash/audit/v1beta4/query.ts create mode 100644 ts/src/generated/akash/base/attributes/v1/attribute.ts create mode 100644 ts/src/generated/akash/base/resources/v1/cpu.ts create mode 100644 ts/src/generated/akash/base/resources/v1/endpoint.ts create mode 100644 ts/src/generated/akash/base/resources/v1/gpu.ts create mode 100644 ts/src/generated/akash/base/resources/v1/memory.ts create mode 100644 ts/src/generated/akash/base/resources/v1/resources.ts create mode 100644 ts/src/generated/akash/base/resources/v1/resourcevalue.ts create mode 100644 ts/src/generated/akash/base/resources/v1/storage.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/authz.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/deployment.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/deploymentmsg.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/genesis.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/group.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/groupid.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/groupmsg.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/groupspec.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/params.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/query.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/resourceunit.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/service.grpc-js.ts create mode 100644 ts/src/generated/akash/deployment/v1beta4/service.ts create mode 100644 ts/src/generated/akash/manifest/v2beta3/group.ts create mode 100644 ts/src/generated/akash/manifest/v2beta3/httpoptions.ts create mode 100644 ts/src/generated/akash/manifest/v2beta3/service.grpc-js.ts create mode 100644 ts/src/generated/akash/manifest/v2beta3/service.ts create mode 100644 ts/src/generated/akash/manifest/v2beta3/serviceexpose.ts create mode 100644 ts/src/generated/akash/market/v1beta5/bid.ts create mode 100644 ts/src/generated/akash/market/v1beta5/genesis.ts create mode 100644 ts/src/generated/akash/market/v1beta5/lease.ts create mode 100644 ts/src/generated/akash/market/v1beta5/order.ts create mode 100644 ts/src/generated/akash/market/v1beta5/params.ts create mode 100644 ts/src/generated/akash/market/v1beta5/query.ts create mode 100644 ts/src/generated/akash/market/v1beta5/service.grpc-js.ts create mode 100644 ts/src/generated/akash/market/v1beta5/service.ts create mode 100644 ts/src/generated/akash/provider/v1beta4/genesis.ts create mode 100644 ts/src/generated/akash/provider/v1beta4/provider.ts create mode 100644 ts/src/generated/akash/provider/v1beta4/query.ts create mode 100644 ts/src/generated/index.akash.audit.v1beta4.ts create mode 100644 ts/src/generated/index.akash.base.attributes.ts create mode 100644 ts/src/generated/index.akash.base.attributes.v1.ts create mode 100644 ts/src/generated/index.akash.base.resources.ts create mode 100644 ts/src/generated/index.akash.base.resources.v1.ts create mode 100644 ts/src/generated/index.akash.deployment.v1beta4.grpc-js.ts create mode 100644 ts/src/generated/index.akash.deployment.v1beta4.ts create mode 100644 ts/src/generated/index.akash.manifest.v2beta3.grpc-js.ts create mode 100644 ts/src/generated/index.akash.manifest.v2beta3.ts create mode 100644 ts/src/generated/index.akash.market.v1beta5.grpc-js.ts create mode 100644 ts/src/generated/index.akash.market.v1beta5.ts create mode 100644 ts/src/generated/index.akash.provider.v1beta4.ts diff --git a/docs/proto/node.md b/docs/proto/node.md index 5614aea7..bbeedbc7 100644 --- a/docs/proto/node.md +++ b/docs/proto/node.md @@ -10,6 +10,29 @@ - [akash/discovery/v1/akash.proto](#akash/discovery/v1/akash.proto) - [Akash](#akash.discovery.v1.Akash) + - [akash/provider/v1beta4/query.proto](#akash/provider/v1beta4/query.proto) + - [QueryProviderRequest](#akash.provider.v1beta4.QueryProviderRequest) + - [QueryProviderResponse](#akash.provider.v1beta4.QueryProviderResponse) + - [QueryProvidersRequest](#akash.provider.v1beta4.QueryProvidersRequest) + - [QueryProvidersResponse](#akash.provider.v1beta4.QueryProvidersResponse) + + - [Query](#akash.provider.v1beta4.Query) + + - [akash/provider/v1beta4/genesis.proto](#akash/provider/v1beta4/genesis.proto) + - [GenesisState](#akash.provider.v1beta4.GenesisState) + + - [akash/provider/v1beta4/provider.proto](#akash/provider/v1beta4/provider.proto) + - [MsgCreateProvider](#akash.provider.v1beta4.MsgCreateProvider) + - [MsgCreateProviderResponse](#akash.provider.v1beta4.MsgCreateProviderResponse) + - [MsgDeleteProvider](#akash.provider.v1beta4.MsgDeleteProvider) + - [MsgDeleteProviderResponse](#akash.provider.v1beta4.MsgDeleteProviderResponse) + - [MsgUpdateProvider](#akash.provider.v1beta4.MsgUpdateProvider) + - [MsgUpdateProviderResponse](#akash.provider.v1beta4.MsgUpdateProviderResponse) + - [Provider](#akash.provider.v1beta4.Provider) + - [ProviderInfo](#akash.provider.v1beta4.ProviderInfo) + + - [Msg](#akash.provider.v1beta4.Msg) + - [akash/provider/v1beta3/query.proto](#akash/provider/v1beta3/query.proto) - [QueryProviderRequest](#akash.provider.v1beta3.QueryProviderRequest) - [QueryProviderResponse](#akash.provider.v1beta3.QueryProviderResponse) @@ -68,6 +91,31 @@ - [Msg](#akash.provider.v1beta1.Msg) + - [akash/audit/v1beta4/audit.proto](#akash/audit/v1beta4/audit.proto) + - [AttributesFilters](#akash.audit.v1beta4.AttributesFilters) + - [AttributesResponse](#akash.audit.v1beta4.AttributesResponse) + - [AuditedAttributes](#akash.audit.v1beta4.AuditedAttributes) + - [MsgDeleteProviderAttributes](#akash.audit.v1beta4.MsgDeleteProviderAttributes) + - [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta4.MsgDeleteProviderAttributesResponse) + - [MsgSignProviderAttributes](#akash.audit.v1beta4.MsgSignProviderAttributes) + - [MsgSignProviderAttributesResponse](#akash.audit.v1beta4.MsgSignProviderAttributesResponse) + - [Provider](#akash.audit.v1beta4.Provider) + + - [Msg](#akash.audit.v1beta4.Msg) + + - [akash/audit/v1beta4/query.proto](#akash/audit/v1beta4/query.proto) + - [QueryAllProvidersAttributesRequest](#akash.audit.v1beta4.QueryAllProvidersAttributesRequest) + - [QueryAuditorAttributesRequest](#akash.audit.v1beta4.QueryAuditorAttributesRequest) + - [QueryProviderAttributesRequest](#akash.audit.v1beta4.QueryProviderAttributesRequest) + - [QueryProviderAuditorRequest](#akash.audit.v1beta4.QueryProviderAuditorRequest) + - [QueryProviderRequest](#akash.audit.v1beta4.QueryProviderRequest) + - [QueryProvidersResponse](#akash.audit.v1beta4.QueryProvidersResponse) + + - [Query](#akash.audit.v1beta4.Query) + + - [akash/audit/v1beta4/genesis.proto](#akash/audit/v1beta4/genesis.proto) + - [GenesisState](#akash.audit.v1beta4.GenesisState) + - [akash/audit/v1beta3/audit.proto](#akash/audit/v1beta3/audit.proto) - [AttributesFilters](#akash.audit.v1beta3.AttributesFilters) - [AttributesResponse](#akash.audit.v1beta3.AttributesResponse) @@ -140,6 +188,68 @@ - [DenomTakeRate](#akash.take.v1beta3.DenomTakeRate) - [Params](#akash.take.v1beta3.Params) + - [akash/deployment/v1beta4/groupmsg.proto](#akash/deployment/v1beta4/groupmsg.proto) + - [MsgCloseGroup](#akash.deployment.v1beta4.MsgCloseGroup) + - [MsgCloseGroupResponse](#akash.deployment.v1beta4.MsgCloseGroupResponse) + - [MsgPauseGroup](#akash.deployment.v1beta4.MsgPauseGroup) + - [MsgPauseGroupResponse](#akash.deployment.v1beta4.MsgPauseGroupResponse) + - [MsgStartGroup](#akash.deployment.v1beta4.MsgStartGroup) + - [MsgStartGroupResponse](#akash.deployment.v1beta4.MsgStartGroupResponse) + + - [akash/deployment/v1beta4/resourceunit.proto](#akash/deployment/v1beta4/resourceunit.proto) + - [ResourceUnit](#akash.deployment.v1beta4.ResourceUnit) + + - [akash/deployment/v1beta4/group.proto](#akash/deployment/v1beta4/group.proto) + - [Group](#akash.deployment.v1beta4.Group) + + - [Group.State](#akash.deployment.v1beta4.Group.State) + + - [akash/deployment/v1beta4/groupid.proto](#akash/deployment/v1beta4/groupid.proto) + - [GroupID](#akash.deployment.v1beta4.GroupID) + + - [akash/deployment/v1beta4/deployment.proto](#akash/deployment/v1beta4/deployment.proto) + - [Deployment](#akash.deployment.v1beta4.Deployment) + - [DeploymentFilters](#akash.deployment.v1beta4.DeploymentFilters) + - [DeploymentID](#akash.deployment.v1beta4.DeploymentID) + + - [Deployment.State](#akash.deployment.v1beta4.Deployment.State) + + - [akash/deployment/v1beta4/query.proto](#akash/deployment/v1beta4/query.proto) + - [QueryDeploymentRequest](#akash.deployment.v1beta4.QueryDeploymentRequest) + - [QueryDeploymentResponse](#akash.deployment.v1beta4.QueryDeploymentResponse) + - [QueryDeploymentsRequest](#akash.deployment.v1beta4.QueryDeploymentsRequest) + - [QueryDeploymentsResponse](#akash.deployment.v1beta4.QueryDeploymentsResponse) + - [QueryGroupRequest](#akash.deployment.v1beta4.QueryGroupRequest) + - [QueryGroupResponse](#akash.deployment.v1beta4.QueryGroupResponse) + + - [Query](#akash.deployment.v1beta4.Query) + + - [akash/deployment/v1beta4/deploymentmsg.proto](#akash/deployment/v1beta4/deploymentmsg.proto) + - [MsgCloseDeployment](#akash.deployment.v1beta4.MsgCloseDeployment) + - [MsgCloseDeploymentResponse](#akash.deployment.v1beta4.MsgCloseDeploymentResponse) + - [MsgCreateDeployment](#akash.deployment.v1beta4.MsgCreateDeployment) + - [MsgCreateDeploymentResponse](#akash.deployment.v1beta4.MsgCreateDeploymentResponse) + - [MsgDepositDeployment](#akash.deployment.v1beta4.MsgDepositDeployment) + - [MsgDepositDeploymentResponse](#akash.deployment.v1beta4.MsgDepositDeploymentResponse) + - [MsgUpdateDeployment](#akash.deployment.v1beta4.MsgUpdateDeployment) + - [MsgUpdateDeploymentResponse](#akash.deployment.v1beta4.MsgUpdateDeploymentResponse) + + - [akash/deployment/v1beta4/service.proto](#akash/deployment/v1beta4/service.proto) + - [Msg](#akash.deployment.v1beta4.Msg) + + - [akash/deployment/v1beta4/authz.proto](#akash/deployment/v1beta4/authz.proto) + - [DepositDeploymentAuthorization](#akash.deployment.v1beta4.DepositDeploymentAuthorization) + + - [akash/deployment/v1beta4/genesis.proto](#akash/deployment/v1beta4/genesis.proto) + - [GenesisDeployment](#akash.deployment.v1beta4.GenesisDeployment) + - [GenesisState](#akash.deployment.v1beta4.GenesisState) + + - [akash/deployment/v1beta4/groupspec.proto](#akash/deployment/v1beta4/groupspec.proto) + - [GroupSpec](#akash.deployment.v1beta4.GroupSpec) + + - [akash/deployment/v1beta4/params.proto](#akash/deployment/v1beta4/params.proto) + - [Params](#akash.deployment.v1beta4.Params) + - [akash/deployment/v1beta3/groupmsg.proto](#akash/deployment/v1beta3/groupmsg.proto) - [MsgCloseGroup](#akash.deployment.v1beta3.MsgCloseGroup) - [MsgCloseGroupResponse](#akash.deployment.v1beta3.MsgCloseGroupResponse) @@ -595,6 +705,63 @@ - [akash/market/v1beta2/params.proto](#akash/market/v1beta2/params.proto) - [Params](#akash.market.v1beta2.Params) + - [akash/market/v1beta5/bid.proto](#akash/market/v1beta5/bid.proto) + - [Bid](#akash.market.v1beta5.Bid) + - [BidFilters](#akash.market.v1beta5.BidFilters) + - [BidID](#akash.market.v1beta5.BidID) + - [MsgCloseBid](#akash.market.v1beta5.MsgCloseBid) + - [MsgCloseBidResponse](#akash.market.v1beta5.MsgCloseBidResponse) + - [MsgCreateBid](#akash.market.v1beta5.MsgCreateBid) + - [MsgCreateBidResponse](#akash.market.v1beta5.MsgCreateBidResponse) + - [ResourceOffer](#akash.market.v1beta5.ResourceOffer) + + - [Bid.State](#akash.market.v1beta5.Bid.State) + + - [akash/market/v1beta5/query.proto](#akash/market/v1beta5/query.proto) + - [QueryBidRequest](#akash.market.v1beta5.QueryBidRequest) + - [QueryBidResponse](#akash.market.v1beta5.QueryBidResponse) + - [QueryBidsRequest](#akash.market.v1beta5.QueryBidsRequest) + - [QueryBidsResponse](#akash.market.v1beta5.QueryBidsResponse) + - [QueryLeaseRequest](#akash.market.v1beta5.QueryLeaseRequest) + - [QueryLeaseResponse](#akash.market.v1beta5.QueryLeaseResponse) + - [QueryLeasesRequest](#akash.market.v1beta5.QueryLeasesRequest) + - [QueryLeasesResponse](#akash.market.v1beta5.QueryLeasesResponse) + - [QueryOrderRequest](#akash.market.v1beta5.QueryOrderRequest) + - [QueryOrderResponse](#akash.market.v1beta5.QueryOrderResponse) + - [QueryOrdersRequest](#akash.market.v1beta5.QueryOrdersRequest) + - [QueryOrdersResponse](#akash.market.v1beta5.QueryOrdersResponse) + + - [Query](#akash.market.v1beta5.Query) + + - [akash/market/v1beta5/service.proto](#akash/market/v1beta5/service.proto) + - [Msg](#akash.market.v1beta5.Msg) + + - [akash/market/v1beta5/lease.proto](#akash/market/v1beta5/lease.proto) + - [Lease](#akash.market.v1beta5.Lease) + - [LeaseFilters](#akash.market.v1beta5.LeaseFilters) + - [LeaseID](#akash.market.v1beta5.LeaseID) + - [MsgCloseLease](#akash.market.v1beta5.MsgCloseLease) + - [MsgCloseLeaseResponse](#akash.market.v1beta5.MsgCloseLeaseResponse) + - [MsgCreateLease](#akash.market.v1beta5.MsgCreateLease) + - [MsgCreateLeaseResponse](#akash.market.v1beta5.MsgCreateLeaseResponse) + - [MsgWithdrawLease](#akash.market.v1beta5.MsgWithdrawLease) + - [MsgWithdrawLeaseResponse](#akash.market.v1beta5.MsgWithdrawLeaseResponse) + + - [Lease.State](#akash.market.v1beta5.Lease.State) + + - [akash/market/v1beta5/genesis.proto](#akash/market/v1beta5/genesis.proto) + - [GenesisState](#akash.market.v1beta5.GenesisState) + + - [akash/market/v1beta5/order.proto](#akash/market/v1beta5/order.proto) + - [Order](#akash.market.v1beta5.Order) + - [OrderFilters](#akash.market.v1beta5.OrderFilters) + - [OrderID](#akash.market.v1beta5.OrderID) + + - [Order.State](#akash.market.v1beta5.Order.State) + + - [akash/market/v1beta5/params.proto](#akash/market/v1beta5/params.proto) + - [Params](#akash.market.v1beta5.Params) + - [akash/inflation/v1beta3/genesis.proto](#akash/inflation/v1beta3/genesis.proto) - [GenesisState](#akash.inflation.v1beta3.GenesisState) @@ -747,6 +914,268 @@ + +

Top

+ + ## akash/provider/v1beta4/query.proto + + + + + + ### QueryProviderRequest + QueryProviderRequest is request type for the Query/Provider RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + + + + + + + + + ### QueryProviderResponse + QueryProviderResponse is response type for the Query/Provider RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `provider` | [Provider](#akash.provider.v1beta4.Provider) | | | + + + + + + + + + ### QueryProvidersRequest + QueryProvidersRequest is request type for the Query/Providers RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryProvidersResponse + QueryProvidersResponse is response type for the Query/Providers RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `providers` | [Provider](#akash.provider.v1beta4.Provider) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + + + + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Providers` | [QueryProvidersRequest](#akash.provider.v1beta4.QueryProvidersRequest) | [QueryProvidersResponse](#akash.provider.v1beta4.QueryProvidersResponse) | Providers queries providers | GET|/akash/provider/v1beta4/providers| + | `Provider` | [QueryProviderRequest](#akash.provider.v1beta4.QueryProviderRequest) | [QueryProviderResponse](#akash.provider.v1beta4.QueryProviderResponse) | Provider queries provider details | GET|/akash/provider/v1beta4/providers/{owner}| + + + + + + +

Top

+ + ## akash/provider/v1beta4/genesis.proto + + + + + + ### GenesisState + GenesisState defines the basic genesis state used by provider module + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `providers` | [Provider](#akash.provider.v1beta4.Provider) | repeated | | + + + + + + + + + + + + + + + + +

Top

+ + ## akash/provider/v1beta4/provider.proto + + + + + + ### MsgCreateProvider + MsgCreateProvider defines an SDK message for creating a provider + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `host_uri` | [string](#string) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | + | `info` | [ProviderInfo](#akash.provider.v1beta4.ProviderInfo) | | | + + + + + + + + + ### MsgCreateProviderResponse + MsgCreateProviderResponse defines the Msg/CreateProvider response type. + + + + + + + + + ### MsgDeleteProvider + MsgDeleteProvider defines an SDK message for deleting a provider + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + + + + + + + + + ### MsgDeleteProviderResponse + MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. + + + + + + + + + ### MsgUpdateProvider + MsgUpdateProvider defines an SDK message for updating a provider + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `host_uri` | [string](#string) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | + | `info` | [ProviderInfo](#akash.provider.v1beta4.ProviderInfo) | | | + + + + + + + + + ### MsgUpdateProviderResponse + MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. + + + + + + + + + ### Provider + Provider stores owner and host details + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `host_uri` | [string](#string) | | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | + | `info` | [ProviderInfo](#akash.provider.v1beta4.ProviderInfo) | | | + + + + + + + + + ### ProviderInfo + ProviderInfo + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `email` | [string](#string) | | | + | `website` | [string](#string) | | | + + + + + + + + + + + + + + + ### Msg + Msg defines the provider Msg service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `CreateProvider` | [MsgCreateProvider](#akash.provider.v1beta4.MsgCreateProvider) | [MsgCreateProviderResponse](#akash.provider.v1beta4.MsgCreateProviderResponse) | CreateProvider defines a method that creates a provider given the proper inputs | | + | `UpdateProvider` | [MsgUpdateProvider](#akash.provider.v1beta4.MsgUpdateProvider) | [MsgUpdateProviderResponse](#akash.provider.v1beta4.MsgUpdateProviderResponse) | UpdateProvider defines a method that updates a provider given the proper inputs | | + | `DeleteProvider` | [MsgDeleteProvider](#akash.provider.v1beta4.MsgDeleteProvider) | [MsgDeleteProviderResponse](#akash.provider.v1beta4.MsgDeleteProviderResponse) | DeleteProvider defines a method that deletes a provider given the proper inputs | | + + + + +

Top

@@ -1414,14 +1843,14 @@ - +

Top

- ## akash/audit/v1beta3/audit.proto + ## akash/audit/v1beta4/audit.proto - + ### AttributesFilters AttributesFilters defines filters used to filter deployments @@ -1437,7 +1866,7 @@ - + ### AttributesResponse AttributesResponse represents details of deployment along with group details @@ -1445,14 +1874,14 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta3.AuditedAttributes) | repeated | | + | `attributes` | [AuditedAttributes](#akash.audit.v1beta4.AuditedAttributes) | repeated | | - + ### AuditedAttributes Attributes @@ -1462,14 +1891,14 @@ | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | - + ### MsgDeleteProviderAttributes MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes @@ -1486,7 +1915,7 @@ - + ### MsgDeleteProviderAttributesResponse MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. @@ -1496,7 +1925,7 @@ - + ### MsgSignProviderAttributes MsgSignProviderAttributes defines an SDK message for signing a provider attributes @@ -1506,14 +1935,14 @@ | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | - + ### MsgSignProviderAttributesResponse MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. @@ -1523,7 +1952,7 @@ - + ### Provider Provider stores owner auditor and attributes details @@ -1533,7 +1962,7 @@ | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | + | `attributes` | [akash.base.attributes.v1.Attribute](#akash.base.attributes.v1.Attribute) | repeated | | @@ -1546,28 +1975,28 @@ - + ### Msg Msg defines the provider Msg service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta3.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta3.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | - | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta3.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta3.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | + | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta4.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta4.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | + | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta4.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta4.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | - +

Top

- ## akash/audit/v1beta3/query.proto + ## akash/audit/v1beta4/query.proto - + ### QueryAllProvidersAttributesRequest QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method @@ -1582,7 +2011,7 @@ - + ### QueryAuditorAttributesRequest QueryAuditorAttributesRequest is request type for the Query/Providers RPC method @@ -1598,7 +2027,7 @@ - + ### QueryProviderAttributesRequest QueryProviderAttributesRequest is request type for the Query/Provider RPC method @@ -1614,7 +2043,7 @@ - + ### QueryProviderAuditorRequest QueryProviderAuditorRequest is request type for the Query/Providers RPC method @@ -1630,7 +2059,7 @@ - + ### QueryProviderRequest QueryProviderRequest is request type for the Query/Provider RPC method @@ -1646,7 +2075,7 @@ - + ### QueryProvidersResponse QueryProvidersResponse is response type for the Query/Providers RPC method @@ -1654,7 +2083,7 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `providers` | [Provider](#akash.audit.v1beta3.Provider) | repeated | | + | `providers` | [Provider](#akash.audit.v1beta4.Provider) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -1668,30 +2097,30 @@ - + ### Query Query defines the gRPC querier service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `AllProvidersAttributes` | [QueryAllProvidersAttributesRequest](#akash.audit.v1beta3.QueryAllProvidersAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | AllProvidersAttributes queries all providers buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta3/audit/attributes/list| - | `ProviderAttributes` | [QueryProviderAttributesRequest](#akash.audit.v1beta3.QueryProviderAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | ProviderAttributes queries all provider signed attributes buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta3/audit/attributes/{owner}/list| - | `ProviderAuditorAttributes` | [QueryProviderAuditorRequest](#akash.audit.v1beta3.QueryProviderAuditorRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | ProviderAuditorAttributes queries provider signed attributes by specific auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta3/audit/attributes/{auditor}/{owner}| - | `AuditorAttributes` | [QueryAuditorAttributesRequest](#akash.audit.v1beta3.QueryAuditorAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | AuditorAttributes queries all providers signed by this auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/provider/v1beta3/auditor/{auditor}/list| + | `AllProvidersAttributes` | [QueryAllProvidersAttributesRequest](#akash.audit.v1beta4.QueryAllProvidersAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta4.QueryProvidersResponse) | AllProvidersAttributes queries all providers buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta4/audit/attributes/list| + | `ProviderAttributes` | [QueryProviderAttributesRequest](#akash.audit.v1beta4.QueryProviderAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta4.QueryProvidersResponse) | ProviderAttributes queries all provider signed attributes buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta4/audit/attributes/{owner}/list| + | `ProviderAuditorAttributes` | [QueryProviderAuditorRequest](#akash.audit.v1beta4.QueryProviderAuditorRequest) | [QueryProvidersResponse](#akash.audit.v1beta4.QueryProvidersResponse) | ProviderAuditorAttributes queries provider signed attributes by specific auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta4/audit/attributes/{auditor}/{owner}| + | `AuditorAttributes` | [QueryAuditorAttributesRequest](#akash.audit.v1beta4.QueryAuditorAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta4.QueryProvidersResponse) | AuditorAttributes queries all providers signed by this auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/provider/v1beta4/auditor/{auditor}/list| - +

Top

- ## akash/audit/v1beta3/genesis.proto + ## akash/audit/v1beta4/genesis.proto - + ### GenesisState GenesisState defines the basic genesis state used by audit module @@ -1699,7 +2128,7 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta3.AuditedAttributes) | repeated | | + | `attributes` | [AuditedAttributes](#akash.audit.v1beta4.AuditedAttributes) | repeated | | @@ -1715,14 +2144,14 @@ - +

Top

- ## akash/audit/v1beta2/audit.proto + ## akash/audit/v1beta3/audit.proto - + ### AttributesFilters AttributesFilters defines filters used to filter deployments @@ -1738,7 +2167,7 @@ - + ### AttributesResponse AttributesResponse represents details of deployment along with group details @@ -1746,14 +2175,14 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta2.AuditedAttributes) | repeated | | + | `attributes` | [AuditedAttributes](#akash.audit.v1beta3.AuditedAttributes) | repeated | | - + ### AuditedAttributes Attributes @@ -1763,14 +2192,14 @@ | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | + | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | - + ### MsgDeleteProviderAttributes MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes @@ -1787,7 +2216,7 @@ - + ### MsgDeleteProviderAttributesResponse MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. @@ -1797,7 +2226,7 @@ - + ### MsgSignProviderAttributes MsgSignProviderAttributes defines an SDK message for signing a provider attributes @@ -1807,14 +2236,14 @@ | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | + | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | - + ### MsgSignProviderAttributesResponse MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. @@ -1824,7 +2253,7 @@ - + ### Provider Provider stores owner auditor and attributes details @@ -1834,7 +2263,7 @@ | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | + | `attributes` | [akash.base.v1beta3.Attribute](#akash.base.v1beta3.Attribute) | repeated | | @@ -1847,28 +2276,28 @@ - + ### Msg Msg defines the provider Msg service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta2.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta2.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | - | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta2.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta2.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | + | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta3.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta3.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | + | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta3.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta3.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | - +

Top

- ## akash/audit/v1beta2/query.proto + ## akash/audit/v1beta3/query.proto - + ### QueryAllProvidersAttributesRequest QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method @@ -1883,7 +2312,7 @@ - + ### QueryAuditorAttributesRequest QueryAuditorAttributesRequest is request type for the Query/Providers RPC method @@ -1899,7 +2328,7 @@ - + ### QueryProviderAttributesRequest QueryProviderAttributesRequest is request type for the Query/Provider RPC method @@ -1915,7 +2344,7 @@ - + ### QueryProviderAuditorRequest QueryProviderAuditorRequest is request type for the Query/Providers RPC method @@ -1931,7 +2360,7 @@ - + ### QueryProviderRequest QueryProviderRequest is request type for the Query/Provider RPC method @@ -1947,7 +2376,7 @@ - + ### QueryProvidersResponse QueryProvidersResponse is response type for the Query/Providers RPC method @@ -1955,7 +2384,7 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `providers` | [Provider](#akash.audit.v1beta2.Provider) | repeated | | + | `providers` | [Provider](#akash.audit.v1beta3.Provider) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -1969,30 +2398,30 @@ - + ### Query Query defines the gRPC querier service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `AllProvidersAttributes` | [QueryAllProvidersAttributesRequest](#akash.audit.v1beta2.QueryAllProvidersAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | AllProvidersAttributes queries all providers buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta2/audit/attributes/list| - | `ProviderAttributes` | [QueryProviderAttributesRequest](#akash.audit.v1beta2.QueryProviderAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | ProviderAttributes queries all provider signed attributes buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta2/audit/attributes/{owner}/list| - | `ProviderAuditorAttributes` | [QueryProviderAuditorRequest](#akash.audit.v1beta2.QueryProviderAuditorRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | ProviderAuditorAttributes queries provider signed attributes by specific auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta2/audit/attributes/{auditor}/{owner}| - | `AuditorAttributes` | [QueryAuditorAttributesRequest](#akash.audit.v1beta2.QueryAuditorAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | AuditorAttributes queries all providers signed by this auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/provider/v1beta2/auditor/{auditor}/list| + | `AllProvidersAttributes` | [QueryAllProvidersAttributesRequest](#akash.audit.v1beta3.QueryAllProvidersAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | AllProvidersAttributes queries all providers buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta3/audit/attributes/list| + | `ProviderAttributes` | [QueryProviderAttributesRequest](#akash.audit.v1beta3.QueryProviderAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | ProviderAttributes queries all provider signed attributes buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta3/audit/attributes/{owner}/list| + | `ProviderAuditorAttributes` | [QueryProviderAuditorRequest](#akash.audit.v1beta3.QueryProviderAuditorRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | ProviderAuditorAttributes queries provider signed attributes by specific auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta3/audit/attributes/{auditor}/{owner}| + | `AuditorAttributes` | [QueryAuditorAttributesRequest](#akash.audit.v1beta3.QueryAuditorAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta3.QueryProvidersResponse) | AuditorAttributes queries all providers signed by this auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/provider/v1beta3/auditor/{auditor}/list| - +

Top

- ## akash/audit/v1beta2/genesis.proto + ## akash/audit/v1beta3/genesis.proto - + ### GenesisState GenesisState defines the basic genesis state used by audit module @@ -2000,7 +2429,7 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta2.AuditedAttributes) | repeated | | + | `attributes` | [AuditedAttributes](#akash.audit.v1beta3.AuditedAttributes) | repeated | | @@ -2016,14 +2445,14 @@ - +

Top

- ## akash/audit/v1beta1/audit.proto + ## akash/audit/v1beta2/audit.proto - + ### AttributesFilters AttributesFilters defines filters used to filter deployments @@ -2039,7 +2468,7 @@ - + ### AttributesResponse AttributesResponse represents details of deployment along with group details @@ -2047,14 +2476,14 @@ | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `attributes` | [AuditedAttributes](#akash.audit.v1beta1.AuditedAttributes) | repeated | | + | `attributes` | [AuditedAttributes](#akash.audit.v1beta2.AuditedAttributes) | repeated | | - + ### AuditedAttributes Attributes @@ -2064,14 +2493,14 @@ | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | + | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | - + ### MsgDeleteProviderAttributes MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes @@ -2088,7 +2517,7 @@ - + ### MsgDeleteProviderAttributesResponse MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. @@ -2098,7 +2527,7 @@ - + ### MsgSignProviderAttributes MsgSignProviderAttributes defines an SDK message for signing a provider attributes @@ -2108,14 +2537,14 @@ | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | + | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | - + ### MsgSignProviderAttributesResponse MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. @@ -2125,7 +2554,7 @@ - + ### Provider Provider stores owner auditor and attributes details @@ -2135,7 +2564,7 @@ | ----- | ---- | ----- | ----------- | | `owner` | [string](#string) | | | | `auditor` | [string](#string) | | | - | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | + | `attributes` | [akash.base.v1beta2.Attribute](#akash.base.v1beta2.Attribute) | repeated | | @@ -2148,109 +2577,116 @@ - + ### Msg Msg defines the provider Msg service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta1.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta1.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | - | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta1.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta1.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | + | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta2.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta2.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | + | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta2.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta2.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | - +

Top

- ## akash/take/v1beta3/query.proto + ## akash/audit/v1beta2/query.proto - - - + + - + ### QueryAllProvidersAttributesRequest + QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + - ### Query - Query defines the gRPC querier service + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - + + + ### QueryAuditorAttributesRequest + QueryAuditorAttributesRequest is request type for the Query/Providers RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `auditor` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + - -

Top

- ## akash/take/v1beta3/genesis.proto - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### QueryProviderAttributesRequest + QueryProviderAttributesRequest is request type for the Query/Provider RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.take.v1beta3.Params) | | | + | `owner` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - - - - - + + - + ### QueryProviderAuditorRequest + QueryProviderAuditorRequest is request type for the Query/Providers RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `auditor` | [string](#string) | | | + | `owner` | [string](#string) | | | + - -

Top

- ## akash/take/v1beta3/params.proto - + - ### DenomTakeRate - DenomTakeRate describes take rate for specified denom + ### QueryProviderRequest + QueryProviderRequest is request type for the Query/Provider RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `denom` | [string](#string) | | | - | `rate` | [uint32](#uint32) | | | + | `auditor` | [string](#string) | | | + | `owner` | [string](#string) | | | - + - ### Params - Params defines the parameters for the x/take package + ### QueryProvidersResponse + QueryProvidersResponse is response type for the Query/Providers RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `denom_take_rates` | [DenomTakeRate](#akash.take.v1beta3.DenomTakeRate) | repeated | denom -> % take rate | - | `default_take_rate` | [uint32](#uint32) | | | + | `providers` | [Provider](#akash.audit.v1beta2.Provider) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -2262,153 +2698,174 @@ + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `AllProvidersAttributes` | [QueryAllProvidersAttributesRequest](#akash.audit.v1beta2.QueryAllProvidersAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | AllProvidersAttributes queries all providers buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta2/audit/attributes/list| + | `ProviderAttributes` | [QueryProviderAttributesRequest](#akash.audit.v1beta2.QueryProviderAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | ProviderAttributes queries all provider signed attributes buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta2/audit/attributes/{owner}/list| + | `ProviderAuditorAttributes` | [QueryProviderAuditorRequest](#akash.audit.v1beta2.QueryProviderAuditorRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | ProviderAuditorAttributes queries provider signed attributes by specific auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/audit/v1beta2/audit/attributes/{auditor}/{owner}| + | `AuditorAttributes` | [QueryAuditorAttributesRequest](#akash.audit.v1beta2.QueryAuditorAttributesRequest) | [QueryProvidersResponse](#akash.audit.v1beta2.QueryProvidersResponse) | AuditorAttributes queries all providers signed by this auditor buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME | GET|/akash/provider/v1beta2/auditor/{auditor}/list| + - +

Top

- ## akash/deployment/v1beta3/groupmsg.proto + ## akash/audit/v1beta2/genesis.proto - + - ### MsgCloseGroup - MsgCloseGroup defines SDK message to close a single Group within a Deployment. + ### GenesisState + GenesisState defines the basic genesis state used by audit module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | + | `attributes` | [AuditedAttributes](#akash.audit.v1beta2.AuditedAttributes) | repeated | | - - + - ### MsgCloseGroupResponse - MsgCloseGroupResponse defines the Msg/CloseGroup response type. + - + - + - - - ### MsgPauseGroup - MsgPauseGroup defines SDK message to close a single Group within a Deployment. + + +

Top

+ + ## akash/audit/v1beta1/audit.proto + + + + + + ### AttributesFilters + AttributesFilters defines filters used to filter deployments | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | + | `auditors` | [string](#string) | repeated | | + | `owners` | [string](#string) | repeated | | - + - ### MsgPauseGroupResponse - MsgPauseGroupResponse defines the Msg/PauseGroup response type. + ### AttributesResponse + AttributesResponse represents details of deployment along with group details + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `attributes` | [AuditedAttributes](#akash.audit.v1beta1.AuditedAttributes) | repeated | | + + - + - ### MsgStartGroup - MsgStartGroup defines SDK message to close a single Group within a Deployment. + ### AuditedAttributes + Attributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | + | `owner` | [string](#string) | | | + | `auditor` | [string](#string) | | | + | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | - + - ### MsgStartGroupResponse - MsgStartGroupResponse defines the Msg/StartGroup response type. + ### MsgDeleteProviderAttributes + MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `auditor` | [string](#string) | | | + | `keys` | [string](#string) | repeated | | + - - - + - + + - + ### MsgDeleteProviderAttributesResponse + MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. - - -

Top

- ## akash/deployment/v1beta3/resourceunit.proto - + - ### ResourceUnit - ResourceUnit extends Resources and adds Count along with the Price + ### MsgSignProviderAttributes + MsgSignProviderAttributes defines an SDK message for signing a provider attributes | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `resource` | [akash.base.v1beta3.Resources](#akash.base.v1beta3.Resources) | | | - | `count` | [uint32](#uint32) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `owner` | [string](#string) | | | + | `auditor` | [string](#string) | | | + | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | - - - - - + + - + ### MsgSignProviderAttributesResponse + MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. - - -

Top

- ## akash/deployment/v1beta3/group.proto - + - ### Group - Group stores group id, state and specifications of group + ### Provider + Provider stores owner auditor and attributes details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `group_id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | - | `state` | [Group.State](#akash.deployment.v1beta3.Group.State) | | | - | `group_spec` | [GroupSpec](#akash.deployment.v1beta3.GroupSpec) | | | - | `created_at` | [int64](#int64) | | | + | `owner` | [string](#string) | | | + | `auditor` | [string](#string) | | | + | `attributes` | [akash.base.v1beta1.Attribute](#akash.base.v1beta1.Attribute) | repeated | | @@ -2416,47 +2873,66 @@ + + + + - + - ### Group.State - State is an enum which refers to state of group + ### Msg + Msg defines the provider Msg service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `SignProviderAttributes` | [MsgSignProviderAttributes](#akash.audit.v1beta1.MsgSignProviderAttributes) | [MsgSignProviderAttributesResponse](#akash.audit.v1beta1.MsgSignProviderAttributesResponse) | SignProviderAttributes defines a method that signs provider attributes | | + | `DeleteProviderAttributes` | [MsgDeleteProviderAttributes](#akash.audit.v1beta1.MsgDeleteProviderAttributes) | [MsgDeleteProviderAttributesResponse](#akash.audit.v1beta1.MsgDeleteProviderAttributesResponse) | DeleteProviderAttributes defines a method that deletes provider attributes | | + + - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | GroupOpen denotes state for group open | - | paused | 2 | GroupOrdered denotes state for group ordered | - | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | - | closed | 4 | GroupClosed denotes state for group closed | + + +

Top

+ + ## akash/take/v1beta3/query.proto + + + + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + - +

Top

- ## akash/deployment/v1beta3/groupid.proto + ## akash/take/v1beta3/genesis.proto - + - ### GroupID - GroupID stores owner, deployment sequence number and group sequence number + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | + | `params` | [Params](#akash.take.v1beta3.Params) | | | @@ -2472,58 +2948,39 @@ - +

Top

- ## akash/deployment/v1beta3/deployment.proto - - - - - - ### Deployment - Deployment stores deploymentID, state and version details - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment_id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | - | `state` | [Deployment.State](#akash.deployment.v1beta3.Deployment.State) | | | - | `version` | [bytes](#bytes) | | | - | `created_at` | [int64](#int64) | | | - - - + ## akash/take/v1beta3/params.proto - + - ### DeploymentFilters - DeploymentFilters defines filters used to filter deployments + ### DenomTakeRate + DenomTakeRate describes take rate for specified denom | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `state` | [string](#string) | | | + | `denom` | [string](#string) | | | + | `rate` | [uint32](#uint32) | | | - + - ### DeploymentID - DeploymentID stores owner and sequence number + ### Params + Params defines the parameters for the x/take package | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | + | `denom_take_rates` | [DenomTakeRate](#akash.take.v1beta3.DenomTakeRate) | repeated | denom -> % take rate | + | `default_take_rate` | [uint32](#uint32) | | | @@ -2531,19 +2988,6 @@ - - - - ### Deployment.State - State is an enum which refers to state of deployment - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | active | 1 | DeploymentActive denotes state for deployment active | - | closed | 2 | DeploymentClosed denotes state for deployment closed | - - @@ -2552,103 +2996,84 @@ - +

Top

- ## akash/deployment/v1beta3/query.proto + ## akash/deployment/v1beta4/groupmsg.proto - + - ### QueryDeploymentRequest - QueryDeploymentRequest is request type for the Query/Deployment RPC method + ### MsgCloseGroup + MsgCloseGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | + | `id` | [GroupID](#akash.deployment.v1beta4.GroupID) | | | - + - ### QueryDeploymentResponse - QueryDeploymentResponse is response type for the Query/Deployment RPC method + ### MsgCloseGroupResponse + MsgCloseGroupResponse defines the Msg/CloseGroup response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta3.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta3.Group) | repeated | | - | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | - - - + - ### QueryDeploymentsRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method + ### MsgPauseGroup + MsgPauseGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [DeploymentFilters](#akash.deployment.v1beta3.DeploymentFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `id` | [GroupID](#akash.deployment.v1beta4.GroupID) | | | - + - ### QueryDeploymentsResponse - QueryDeploymentsResponse is response type for the Query/Deployments RPC method + ### MsgPauseGroupResponse + MsgPauseGroupResponse defines the Msg/PauseGroup response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta3.QueryDeploymentResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - + - ### QueryGroupRequest - QueryGroupRequest is request type for the Query/Group RPC method + ### MsgStartGroup + MsgStartGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | + | `id` | [GroupID](#akash.deployment.v1beta4.GroupID) | | | - + - ### QueryGroupResponse - QueryGroupResponse is response type for the Query/Group RPC method + ### MsgStartGroupResponse + MsgStartGroupResponse defines the Msg/StartGroup response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `group` | [Group](#akash.deployment.v1beta3.Group) | | | - - @@ -2658,131 +3083,111 @@ - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta3.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta3.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta3/deployments/list| - | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta3.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta3.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta3/deployments/info| - | `Group` | [QueryGroupRequest](#akash.deployment.v1beta3.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta3.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta3/groups/info| - - +

Top

- ## akash/deployment/v1beta3/deploymentmsg.proto + ## akash/deployment/v1beta4/resourceunit.proto - + - ### MsgCloseDeployment - MsgCloseDeployment defines an SDK message for closing deployment + ### ResourceUnit + ResourceUnit extends Resources and adds Count along with the Price | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | + | `resource` | [akash.base.resources.v1.Resources](#akash.base.resources.v1.Resources) | | | + | `count` | [uint32](#uint32) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - - + - ### MsgCloseDeploymentResponse - MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. + + + + + + + +

Top

+ ## akash/deployment/v1beta4/group.proto - + - ### MsgCreateDeployment - MsgCreateDeployment defines an SDK message for creating deployment + ### Group + Group stores group id, state and specifications of group | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | - | `groups` | [GroupSpec](#akash.deployment.v1beta3.GroupSpec) | repeated | | - | `version` | [bytes](#bytes) | | | - | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `depositor` | [string](#string) | | Depositor pays for the deposit | - + | `group_id` | [GroupID](#akash.deployment.v1beta4.GroupID) | | | + | `state` | [Group.State](#akash.deployment.v1beta4.Group.State) | | | + | `group_spec` | [GroupSpec](#akash.deployment.v1beta4.GroupSpec) | | | + | `created_at` | [int64](#int64) | | | - - - ### MsgCreateDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + + - + ### Group.State + State is an enum which refers to state of group + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | open | 1 | GroupOpen denotes state for group open | + | paused | 2 | GroupOrdered denotes state for group ordered | + | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | + | closed | 4 | GroupClosed denotes state for group closed | - - ### MsgDepositDeployment - MsgDepositDeployment deposits more funds into the deposit account + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | - | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `depositor` | [string](#string) | | Depositor pays for the deposit | - - + - + - - - ### MsgDepositDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - + +

Top

+ ## akash/deployment/v1beta4/groupid.proto - + - ### MsgUpdateDeployment - MsgUpdateDeployment defines an SDK message for updating deployment + ### GroupID + GroupID stores owner, deployment sequence number and group sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | - | `version` | [bytes](#bytes) | | | - - - - - + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | - - - ### MsgUpdateDeploymentResponse - MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. - @@ -2797,55 +3202,58 @@ - +

Top

- ## akash/deployment/v1beta3/service.proto + ## akash/deployment/v1beta4/deployment.proto - - - + + - + ### Deployment + Deployment stores deploymentID, state and version details - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `deployment_id` | [DeploymentID](#akash.deployment.v1beta4.DeploymentID) | | | + | `state` | [Deployment.State](#akash.deployment.v1beta4.Deployment.State) | | | + | `version` | [bytes](#bytes) | | | + | `created_at` | [int64](#int64) | | | + + - ### Msg - Msg defines the deployment Msg service. + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta3.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta3.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | - | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta3.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta3.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | - | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta3.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta3.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | - | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta3.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta3.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | - | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta3.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta3.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | - | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta3.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta3.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | - | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta3.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta3.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | - + + ### DeploymentFilters + DeploymentFilters defines filters used to filter deployments + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `state` | [string](#string) | | | - -

Top

- ## akash/deployment/v1beta3/authz.proto - + - ### DepositDeploymentAuthorization - DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -the granter's account for a deployment. + ### DeploymentID + DeploymentID stores owner and sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | @@ -2853,6 +3261,19 @@ the granter's account for a deployment. + + + + ### Deployment.State + State is an enum which refers to state of deployment + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | active | 1 | DeploymentActive denotes state for deployment active | + | closed | 2 | DeploymentClosed denotes state for deployment closed | + + @@ -2861,103 +3282,101 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta3/genesis.proto + ## akash/deployment/v1beta4/query.proto - + - ### GenesisDeployment - GenesisDeployment defines the basic genesis state used by deployment module + ### QueryDeploymentRequest + QueryDeploymentRequest is request type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta3.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta3.Group) | repeated | | + | `id` | [DeploymentID](#akash.deployment.v1beta4.DeploymentID) | | | - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### QueryDeploymentResponse + QueryDeploymentResponse is response type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployments` | [GenesisDeployment](#akash.deployment.v1beta3.GenesisDeployment) | repeated | | - | `params` | [Params](#akash.deployment.v1beta3.Params) | | | + | `deployment` | [Deployment](#akash.deployment.v1beta4.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta4.Group) | repeated | | + | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | - - - - - + + - + ### QueryDeploymentsRequest + QueryDeploymentsRequest is request type for the Query/Deployments RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `filters` | [DeploymentFilters](#akash.deployment.v1beta4.DeploymentFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + - -

Top

- ## akash/deployment/v1beta3/groupspec.proto - + - ### GroupSpec - GroupSpec stores group specifications + ### QueryDeploymentsResponse + QueryDeploymentsResponse is response type for the Query/Deployments RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `requirements` | [akash.base.v1beta3.PlacementRequirements](#akash.base.v1beta3.PlacementRequirements) | | | - | `resources` | [ResourceUnit](#akash.deployment.v1beta3.ResourceUnit) | repeated | | + | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta4.QueryDeploymentResponse) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - + + - + ### QueryGroupRequest + QueryGroupRequest is request type for the Query/Group RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [GroupID](#akash.deployment.v1beta4.GroupID) | | | + - -

Top

- ## akash/deployment/v1beta3/params.proto - + - ### Params - Params defines the parameters for the x/deployment package + ### QueryGroupResponse + QueryGroupResponse is response type for the Query/Group RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `min_deposits` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | repeated | | + | `group` | [Group](#akash.deployment.v1beta4.Group) | | | @@ -2969,168 +3388,194 @@ the granter's account for a deployment. + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta4.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta4.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta4/deployments/list| + | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta4.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta4.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta4/deployments/info| + | `Group` | [QueryGroupRequest](#akash.deployment.v1beta4.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta4.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta4/groups/info| + - +

Top

- ## akash/deployment/v1beta2/groupmsg.proto + ## akash/deployment/v1beta4/deploymentmsg.proto - + - ### MsgCloseGroup - MsgCloseGroup defines SDK message to close a single Group within a Deployment. + ### MsgCloseDeployment + MsgCloseDeployment defines an SDK message for closing deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta4.DeploymentID) | | | - + - ### MsgCloseGroupResponse - MsgCloseGroupResponse defines the Msg/CloseGroup response type. + ### MsgCloseDeploymentResponse + MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. - + - ### MsgPauseGroup - MsgPauseGroup defines SDK message to close a single Group within a Deployment. + ### MsgCreateDeployment + MsgCreateDeployment defines an SDK message for creating deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta4.DeploymentID) | | | + | `groups` | [GroupSpec](#akash.deployment.v1beta4.GroupSpec) | repeated | | + | `version` | [bytes](#bytes) | | | + | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `depositor` | [string](#string) | | Depositor pays for the deposit | - + - ### MsgPauseGroupResponse - MsgPauseGroupResponse defines the Msg/PauseGroup response type. + ### MsgCreateDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - + - ### MsgStartGroup - MsgStartGroup defines SDK message to close a single Group within a Deployment. + ### MsgDepositDeployment + MsgDepositDeployment deposits more funds into the deposit account | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta4.DeploymentID) | | | + | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `depositor` | [string](#string) | | Depositor pays for the deposit | - + - ### MsgStartGroupResponse - MsgStartGroupResponse defines the Msg/StartGroup response type. + ### MsgDepositDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - - - - - + + - + ### MsgUpdateDeployment + MsgUpdateDeployment defines an SDK message for updating deployment + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [DeploymentID](#akash.deployment.v1beta4.DeploymentID) | | | + | `version` | [bytes](#bytes) | | | + - -

Top

- ## akash/deployment/v1beta2/group.proto - + - ### Group - Group stores group id, state and specifications of group + ### MsgUpdateDeploymentResponse + MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `group_id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | - | `state` | [Group.State](#akash.deployment.v1beta2.Group.State) | | | - | `group_spec` | [GroupSpec](#akash.deployment.v1beta2.GroupSpec) | | | - | `created_at` | [int64](#int64) | | | - - - - + - ### Group.State - State is an enum which refers to state of group + - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | GroupOpen denotes state for group open | - | paused | 2 | GroupOrdered denotes state for group ordered | - | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | - | closed | 4 | GroupClosed denotes state for group closed | + + + + + +

Top

+ + ## akash/deployment/v1beta4/service.proto + + + + + + ### Msg + Msg defines the deployment Msg service. + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta4.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta4.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | + | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta4.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta4.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | + | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta4.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta4.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | + | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta4.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta4.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | + | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta4.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta4.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | + | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta4.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta4.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | + | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta4.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta4.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | + - +

Top

- ## akash/deployment/v1beta2/groupid.proto + ## akash/deployment/v1beta4/authz.proto - + - ### GroupID - GroupID stores owner, deployment sequence number and group sequence number + ### DepositDeploymentAuthorization + DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from +the granter's account for a deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | + | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | @@ -3146,58 +3591,72 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/deployment.proto + ## akash/deployment/v1beta4/genesis.proto - + - ### Deployment - Deployment stores deploymentID, state and version details + ### GenesisDeployment + GenesisDeployment defines the basic genesis state used by deployment module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment_id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - | `state` | [Deployment.State](#akash.deployment.v1beta2.Deployment.State) | | | - | `version` | [bytes](#bytes) | | | - | `created_at` | [int64](#int64) | | | + | `deployment` | [Deployment](#akash.deployment.v1beta4.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta4.Group) | repeated | | - + - ### DeploymentFilters - DeploymentFilters defines filters used to filter deployments + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `state` | [string](#string) | | | + | `deployments` | [GenesisDeployment](#akash.deployment.v1beta4.GenesisDeployment) | repeated | | + | `params` | [Params](#akash.deployment.v1beta4.Params) | | | + + + + + + + + + + + + + + +

Top

+ ## akash/deployment/v1beta4/groupspec.proto - + - ### DeploymentID - DeploymentID stores owner and sequence number + ### GroupSpec + GroupSpec stores group specifications | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | + | `name` | [string](#string) | | | + | `requirements` | [akash.base.attributes.v1.PlacementRequirements](#akash.base.attributes.v1.PlacementRequirements) | | | + | `resources` | [ResourceUnit](#akash.deployment.v1beta4.ResourceUnit) | repeated | | @@ -3205,18 +3664,36 @@ the granter's account for a deployment. + + + + + + - + + +

Top

- ### Deployment.State - State is an enum which refers to state of deployment + ## akash/deployment/v1beta4/params.proto + - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | active | 1 | DeploymentActive denotes state for deployment active | - | closed | 2 | DeploymentClosed denotes state for deployment closed | + + + ### Params + Params defines the parameters for the x/deployment package + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `min_deposits` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | repeated | | + + + + + + @@ -3226,103 +3703,84 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/query.proto + ## akash/deployment/v1beta3/groupmsg.proto - + - ### QueryDeploymentRequest - QueryDeploymentRequest is request type for the Query/Deployment RPC method + ### MsgCloseGroup + MsgCloseGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | + | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | - + - ### QueryDeploymentResponse - QueryDeploymentResponse is response type for the Query/Deployment RPC method + ### MsgCloseGroupResponse + MsgCloseGroupResponse defines the Msg/CloseGroup response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta2.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta2.Group) | repeated | | - | `escrow_account` | [akash.escrow.v1beta2.Account](#akash.escrow.v1beta2.Account) | | | - - - + - ### QueryDeploymentsRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method + ### MsgPauseGroup + MsgPauseGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [DeploymentFilters](#akash.deployment.v1beta2.DeploymentFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | - + - ### QueryDeploymentsResponse - QueryDeploymentsResponse is response type for the Query/Deployments RPC method + ### MsgPauseGroupResponse + MsgPauseGroupResponse defines the Msg/PauseGroup response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta2.QueryDeploymentResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - + - ### QueryGroupRequest - QueryGroupRequest is request type for the Query/Group RPC method + ### MsgStartGroup + MsgStartGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | - + - ### QueryGroupResponse - QueryGroupResponse is response type for the Query/Group RPC method + ### MsgStartGroupResponse + MsgStartGroupResponse defines the Msg/StartGroup response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `group` | [Group](#akash.deployment.v1beta2.Group) | | | - - @@ -3332,131 +3790,111 @@ the granter's account for a deployment. - - - - ### Query - Query defines the gRPC querier service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta2.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta2.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta2/deployments/list| - | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta2.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta2.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta2/deployments/info| - | `Group` | [QueryGroupRequest](#akash.deployment.v1beta2.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta2.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta2/groups/info| - - +

Top

- ## akash/deployment/v1beta2/deploymentmsg.proto + ## akash/deployment/v1beta3/resourceunit.proto - + - ### MsgCloseDeployment - MsgCloseDeployment defines an SDK message for closing deployment + ### ResourceUnit + ResourceUnit extends Resources and adds Count along with the Price | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | + | `resource` | [akash.base.v1beta3.Resources](#akash.base.v1beta3.Resources) | | | + | `count` | [uint32](#uint32) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - - + - ### MsgCloseDeploymentResponse - MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. + + + + + + + +

Top

+ ## akash/deployment/v1beta3/group.proto - + - ### MsgCreateDeployment - MsgCreateDeployment defines an SDK message for creating deployment + ### Group + Group stores group id, state and specifications of group | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - | `groups` | [GroupSpec](#akash.deployment.v1beta2.GroupSpec) | repeated | | - | `version` | [bytes](#bytes) | | | - | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `depositor` | [string](#string) | | Depositor pays for the deposit | - + | `group_id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | + | `state` | [Group.State](#akash.deployment.v1beta3.Group.State) | | | + | `group_spec` | [GroupSpec](#akash.deployment.v1beta3.GroupSpec) | | | + | `created_at` | [int64](#int64) | | | - - - ### MsgCreateDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + + - + ### Group.State + State is an enum which refers to state of group + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | open | 1 | GroupOpen denotes state for group open | + | paused | 2 | GroupOrdered denotes state for group ordered | + | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | + | closed | 4 | GroupClosed denotes state for group closed | - - ### MsgDepositDeployment - MsgDepositDeployment deposits more funds into the deposit account + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `depositor` | [string](#string) | | Depositor pays for the deposit | - - + - + - - - ### MsgDepositDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - + +

Top

+ ## akash/deployment/v1beta3/groupid.proto - + - ### MsgUpdateDeployment - MsgUpdateDeployment defines an SDK message for updating deployment + ### GroupID + GroupID stores owner, deployment sequence number and group sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - | `version` | [bytes](#bytes) | | | - - - - - + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | - - - ### MsgUpdateDeploymentResponse - MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. - @@ -3471,55 +3909,58 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/service.proto + ## akash/deployment/v1beta3/deployment.proto - - - + + - + ### Deployment + Deployment stores deploymentID, state and version details - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `deployment_id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | + | `state` | [Deployment.State](#akash.deployment.v1beta3.Deployment.State) | | | + | `version` | [bytes](#bytes) | | | + | `created_at` | [int64](#int64) | | | + + - ### Msg - Msg defines the deployment Msg service. + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta2.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta2.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | - | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta2.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta2.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | - | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta2.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta2.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | - | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta2.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta2.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | - | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta2.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta2.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | - | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta2.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta2.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | - | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta2.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta2.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | - + + + ### DeploymentFilters + DeploymentFilters defines filters used to filter deployments + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `state` | [string](#string) | | | + - -

Top

- ## akash/deployment/v1beta2/authz.proto - + - ### DepositDeploymentAuthorization - DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -the granter's account for a deployment. + ### DeploymentID + DeploymentID stores owner and sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | @@ -3527,6 +3968,19 @@ the granter's account for a deployment. + + + + ### Deployment.State + State is an enum which refers to state of deployment + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | active | 1 | DeploymentActive denotes state for deployment active | + | closed | 2 | DeploymentClosed denotes state for deployment closed | + + @@ -3535,105 +3989,101 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta2/genesis.proto + ## akash/deployment/v1beta3/query.proto - + - ### GenesisDeployment - GenesisDeployment defines the basic genesis state used by deployment module + ### QueryDeploymentRequest + QueryDeploymentRequest is request type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta2.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta2.Group) | repeated | | + | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### QueryDeploymentResponse + QueryDeploymentResponse is response type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployments` | [GenesisDeployment](#akash.deployment.v1beta2.GenesisDeployment) | repeated | | - | `params` | [Params](#akash.deployment.v1beta2.Params) | | | + | `deployment` | [Deployment](#akash.deployment.v1beta3.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta3.Group) | repeated | | + | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | - - - - - + + - + ### QueryDeploymentsRequest + QueryDeploymentsRequest is request type for the Query/Deployments RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `filters` | [DeploymentFilters](#akash.deployment.v1beta3.DeploymentFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + - -

Top

- ## akash/deployment/v1beta2/groupspec.proto - + - ### GroupSpec - GroupSpec stores group specifications + ### QueryDeploymentsResponse + QueryDeploymentsResponse is response type for the Query/Deployments RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `requirements` | [akash.base.v1beta2.PlacementRequirements](#akash.base.v1beta2.PlacementRequirements) | | | - | `resources` | [Resource](#akash.deployment.v1beta2.Resource) | repeated | | + | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta3.QueryDeploymentResponse) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - + + - + ### QueryGroupRequest + QueryGroupRequest is request type for the Query/Group RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [GroupID](#akash.deployment.v1beta3.GroupID) | | | + - -

Top

- ## akash/deployment/v1beta2/resource.proto - + - ### Resource - Resource stores unit, total count and price of resource + ### QueryGroupResponse + QueryGroupResponse is response type for the Query/Group RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `resources` | [akash.base.v1beta2.ResourceUnits](#akash.base.v1beta2.ResourceUnits) | | | - | `count` | [uint32](#uint32) | | | - | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `group` | [Group](#akash.deployment.v1beta3.Group) | | | @@ -3645,186 +4095,194 @@ the granter's account for a deployment. - - - - -

Top

+ - ## akash/deployment/v1beta2/params.proto - + ### Query + Query defines the gRPC querier service + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta3.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta3.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta3/deployments/list| + | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta3.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta3.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta3/deployments/info| + | `Group` | [QueryGroupRequest](#akash.deployment.v1beta3.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta3.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta3/groups/info| - - - ### Params - Params defines the parameters for the x/deployment package + - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - + +

Top

+ ## akash/deployment/v1beta3/deploymentmsg.proto - - - - - + + - + ### MsgCloseDeployment + MsgCloseDeployment defines an SDK message for closing deployment + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | + - -

Top

- ## akash/deployment/v1beta1/group.proto - + - ### Group - Group stores group id, state and specifications of group + ### MsgCloseDeploymentResponse + MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `group_id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | - | `state` | [Group.State](#akash.deployment.v1beta1.Group.State) | | | - | `group_spec` | [GroupSpec](#akash.deployment.v1beta1.GroupSpec) | | | - | `created_at` | [int64](#int64) | | | - - - + - ### GroupID - GroupID stores owner, deployment sequence number and group sequence number + ### MsgCreateDeployment + MsgCreateDeployment defines an SDK message for creating deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `gseq` | [uint32](#uint32) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | + | `groups` | [GroupSpec](#akash.deployment.v1beta3.GroupSpec) | repeated | | + | `version` | [bytes](#bytes) | | | + | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `depositor` | [string](#string) | | Depositor pays for the deposit | - + - ### GroupSpec - GroupSpec stores group specifications + ### MsgCreateDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `name` | [string](#string) | | | - | `requirements` | [akash.base.v1beta1.PlacementRequirements](#akash.base.v1beta1.PlacementRequirements) | | | - | `resources` | [Resource](#akash.deployment.v1beta1.Resource) | repeated | | - - - + - ### MsgCloseGroup - MsgCloseGroup defines SDK message to close a single Group within a Deployment. + ### MsgDepositDeployment + MsgDepositDeployment deposits more funds into the deposit account | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | + | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `depositor` | [string](#string) | | Depositor pays for the deposit | - + - ### MsgCloseGroupResponse - MsgCloseGroupResponse defines the Msg/CloseGroup response type. + ### MsgDepositDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. - + - ### MsgPauseGroup - MsgPauseGroup defines SDK message to close a single Group within a Deployment. + ### MsgUpdateDeployment + MsgUpdateDeployment defines an SDK message for updating deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta3.DeploymentID) | | | + | `version` | [bytes](#bytes) | | | - + - ### MsgPauseGroupResponse - MsgPauseGroupResponse defines the Msg/PauseGroup response type. + ### MsgUpdateDeploymentResponse + MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. - - + - ### MsgStartGroup - MsgStartGroup defines SDK message to close a single Group within a Deployment. + + + + + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | + +

Top

+ ## akash/deployment/v1beta3/service.proto + + + + + + - + - ### MsgStartGroupResponse - MsgStartGroupResponse defines the Msg/StartGroup response type. + ### Msg + Msg defines the deployment Msg service. + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta3.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta3.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | + | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta3.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta3.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | + | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta3.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta3.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | + | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta3.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta3.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | + | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta3.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta3.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | + | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta3.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta3.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | + | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta3.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta3.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | + + + + +

Top

+ ## akash/deployment/v1beta3/authz.proto - + - ### Resource - Resource stores unit, total count and price of resource + ### DepositDeploymentAuthorization + DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from +the granter's account for a deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `resources` | [akash.base.v1beta1.ResourceUnits](#akash.base.v1beta1.ResourceUnits) | | | - | `count` | [uint32](#uint32) | | | - | `price` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | @@ -3832,21 +4290,6 @@ the granter's account for a deployment. - - - - ### Group.State - State is an enum which refers to state of group - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | open | 1 | GroupOpen denotes state for group open | - | paused | 2 | GroupOrdered denotes state for group ordered | - | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | - | closed | 4 | GroupClosed denotes state for group closed | - - @@ -3855,347 +4298,343 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta1/deployment.proto + ## akash/deployment/v1beta3/genesis.proto - + - ### Deployment - Deployment stores deploymentID, state and version details + ### GenesisDeployment + GenesisDeployment defines the basic genesis state used by deployment module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment_id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - | `state` | [Deployment.State](#akash.deployment.v1beta1.Deployment.State) | | | - | `version` | [bytes](#bytes) | | | - | `created_at` | [int64](#int64) | | | + | `deployment` | [Deployment](#akash.deployment.v1beta3.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta3.Group) | repeated | | - + - ### DeploymentFilters - DeploymentFilters defines filters used to filter deployments + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | - | `state` | [string](#string) | | | + | `deployments` | [GenesisDeployment](#akash.deployment.v1beta3.GenesisDeployment) | repeated | | + | `params` | [Params](#akash.deployment.v1beta3.Params) | | | - - + - ### DeploymentID - DeploymentID stores owner and sequence number + + + + + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `dseq` | [uint64](#uint64) | | | + +

Top

+ ## akash/deployment/v1beta3/groupspec.proto - + - ### MsgCloseDeployment - MsgCloseDeployment defines an SDK message for closing deployment + ### GroupSpec + GroupSpec stores group specifications | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | + | `name` | [string](#string) | | | + | `requirements` | [akash.base.v1beta3.PlacementRequirements](#akash.base.v1beta3.PlacementRequirements) | | | + | `resources` | [ResourceUnit](#akash.deployment.v1beta3.ResourceUnit) | repeated | | - - + - ### MsgCloseDeploymentResponse - MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. + + + + + + + +

Top

+ ## akash/deployment/v1beta3/params.proto - + - ### MsgCreateDeployment - MsgCreateDeployment defines an SDK message for creating deployment + ### Params + Params defines the parameters for the x/deployment package | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - | `groups` | [GroupSpec](#akash.deployment.v1beta1.GroupSpec) | repeated | | - | `version` | [bytes](#bytes) | | | - | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `min_deposits` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | repeated | | - - + - ### MsgCreateDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + + + + + + + +

Top

+ ## akash/deployment/v1beta2/groupmsg.proto - + - ### MsgDepositDeployment - MsgDepositDeployment deposits more funds into the deposit account + ### MsgCloseGroup + MsgCloseGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | - + - ### MsgDepositDeploymentResponse - MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + ### MsgCloseGroupResponse + MsgCloseGroupResponse defines the Msg/CloseGroup response type. - + - ### MsgUpdateDeployment - MsgUpdateDeployment defines an SDK message for updating deployment + ### MsgPauseGroup + MsgPauseGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - | `groups` | [GroupSpec](#akash.deployment.v1beta1.GroupSpec) | repeated | | - | `version` | [bytes](#bytes) | | | + | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | - + - ### MsgUpdateDeploymentResponse - MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. + ### MsgPauseGroupResponse + MsgPauseGroupResponse defines the Msg/PauseGroup response type. - + + + + ### MsgStartGroup + MsgStartGroup defines SDK message to close a single Group within a Deployment. - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + + - ### Deployment.State - State is an enum which refers to state of deployment + - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | active | 1 | DeploymentActive denotes state for deployment active | - | closed | 2 | DeploymentClosed denotes state for deployment closed | + - + ### MsgStartGroupResponse + MsgStartGroupResponse defines the Msg/StartGroup response type. - + - - ### Msg - Msg defines the deployment Msg service. + + + + + - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta1.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta1.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | - | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta1.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta1.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | - | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta1.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta1.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | - | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta1.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta1.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | - | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta1.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta1.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | - | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta1.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta1.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | - | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta1.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta1.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | - - +

Top

- ## akash/deployment/v1beta1/query.proto + ## akash/deployment/v1beta2/group.proto - + - ### QueryDeploymentRequest - QueryDeploymentRequest is request type for the Query/Deployment RPC method + ### Group + Group stores group id, state and specifications of group | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | + | `group_id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + | `state` | [Group.State](#akash.deployment.v1beta2.Group.State) | | | + | `group_spec` | [GroupSpec](#akash.deployment.v1beta2.GroupSpec) | | | + | `created_at` | [int64](#int64) | | | + + - + - ### QueryDeploymentResponse - QueryDeploymentResponse is response type for the Query/Deployment RPC method + ### Group.State + State is an enum which refers to state of group - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta1.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta1.Group) | repeated | | - | `escrow_account` | [akash.escrow.v1beta1.Account](#akash.escrow.v1beta1.Account) | | | - + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | open | 1 | GroupOpen denotes state for group open | + | paused | 2 | GroupOrdered denotes state for group ordered | + | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | + | closed | 4 | GroupClosed denotes state for group closed | - + - - + - ### QueryDeploymentsRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method + - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `filters` | [DeploymentFilters](#akash.deployment.v1beta1.DeploymentFilters) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | - + +

Top

+ ## akash/deployment/v1beta2/groupid.proto - + - ### QueryDeploymentsResponse - QueryDeploymentsResponse is response type for the Query/Deployments RPC method + ### GroupID + GroupID stores owner, deployment sequence number and group sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta1.QueryDeploymentResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | - - + - ### QueryGroupRequest - QueryGroupRequest is request type for the Query/Group RPC method + + + + + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | + +

Top

+ ## akash/deployment/v1beta2/deployment.proto - + - ### QueryGroupResponse - QueryGroupResponse is response type for the Query/Group RPC method + ### Deployment + Deployment stores deploymentID, state and version details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `group` | [Group](#akash.deployment.v1beta1.Group) | | | + | `deployment_id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | + | `state` | [Deployment.State](#akash.deployment.v1beta2.Deployment.State) | | | + | `version` | [bytes](#bytes) | | | + | `created_at` | [int64](#int64) | | | - - - - - - - + - ### Query - Query defines the gRPC querier service + ### DeploymentFilters + DeploymentFilters defines filters used to filter deployments - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta1.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta1.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta1/deployments/list| - | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta1.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta1.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta1/deployments/info| - | `Group` | [QueryGroupRequest](#akash.deployment.v1beta1.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta1.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta1/groups/info| - - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `state` | [string](#string) | | | - -

Top

- ## akash/deployment/v1beta1/authz.proto - + - ### DepositDeploymentAuthorization - DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from -the granter's account for a deployment. + ### DeploymentID + DeploymentID stores owner and sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | @@ -4203,6 +4642,19 @@ the granter's account for a deployment. + + + + ### Deployment.State + State is an enum which refers to state of deployment + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | active | 1 | DeploymentActive denotes state for deployment active | + | closed | 2 | DeploymentClosed denotes state for deployment closed | + + @@ -4211,101 +4663,101 @@ the granter's account for a deployment. - +

Top

- ## akash/deployment/v1beta1/genesis.proto + ## akash/deployment/v1beta2/query.proto - + - ### GenesisDeployment - GenesisDeployment defines the basic genesis state used by deployment module + ### QueryDeploymentRequest + QueryDeploymentRequest is request type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment` | [Deployment](#akash.deployment.v1beta1.Deployment) | | | - | `groups` | [Group](#akash.deployment.v1beta1.Group) | repeated | | + | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### QueryDeploymentResponse + QueryDeploymentResponse is response type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployments` | [GenesisDeployment](#akash.deployment.v1beta1.GenesisDeployment) | repeated | | - | `params` | [Params](#akash.deployment.v1beta1.Params) | | | + | `deployment` | [Deployment](#akash.deployment.v1beta2.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta2.Group) | repeated | | + | `escrow_account` | [akash.escrow.v1beta2.Account](#akash.escrow.v1beta2.Account) | | | - - - - - + + - + ### QueryDeploymentsRequest + QueryDeploymentsRequest is request type for the Query/Deployments RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `filters` | [DeploymentFilters](#akash.deployment.v1beta2.DeploymentFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + - -

Top

- ## akash/deployment/v1beta1/params.proto - + - ### Params - Params defines the parameters for the x/deployment package + ### QueryDeploymentsResponse + QueryDeploymentsResponse is response type for the Query/Deployments RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `deployment_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta2.QueryDeploymentResponse) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | - - - - - + + - + ### QueryGroupRequest + QueryGroupRequest is request type for the Query/Group RPC method + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [GroupID](#akash.deployment.v1beta2.GroupID) | | | + - -

Top

- ## akash/staking/v1beta3/genesis.proto - + - ### GenesisState - GenesisState stores slice of genesis deployment instance + ### QueryGroupResponse + QueryGroupResponse is response type for the Query/Group RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.staking.v1beta3.Params) | | | + | `group` | [Group](#akash.deployment.v1beta2.Group) | | | @@ -4317,93 +4769,149 @@ the granter's account for a deployment. + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta2.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta2.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta2/deployments/list| + | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta2.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta2.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta2/deployments/info| + | `Group` | [QueryGroupRequest](#akash.deployment.v1beta2.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta2.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta2/groups/info| + - +

Top

- ## akash/staking/v1beta3/params.proto + ## akash/deployment/v1beta2/deploymentmsg.proto - + - ### Params - Params extends the parameters for the x/staking module + ### MsgCloseDeployment + MsgCloseDeployment defines an SDK message for closing deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `min_commission_rate` | [string](#string) | | min_commission_rate is the chain-wide minimum commission rate that a validator can charge their delegators | + | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | - + + - + ### MsgCloseDeploymentResponse + MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. - + - + + + + ### MsgCreateDeployment + MsgCreateDeployment defines an SDK message for creating deployment + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | + | `groups` | [GroupSpec](#akash.deployment.v1beta2.GroupSpec) | repeated | | + | `version` | [bytes](#bytes) | | | + | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `depositor` | [string](#string) | | Depositor pays for the deposit | + - -

Top

- ## akash/cert/v1beta3/query.proto - + - ### CertificateResponse - CertificateResponse contains a single X509 certificate and its serial number + ### MsgCreateDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + + + + + + + + + ### MsgDepositDeployment + MsgDepositDeployment deposits more funds into the deposit account | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `certificate` | [Certificate](#akash.cert.v1beta3.Certificate) | | | - | `serial` | [string](#string) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | + | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `depositor` | [string](#string) | | Depositor pays for the deposit | - + - ### QueryCertificatesRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method + ### MsgDepositDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + + + + + + + + + ### MsgUpdateDeployment + MsgUpdateDeployment defines an SDK message for updating deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filter` | [CertificateFilter](#akash.cert.v1beta3.CertificateFilter) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta2.DeploymentID) | | | + | `version` | [bytes](#bytes) | | | - + - ### QueryCertificatesResponse - QueryCertificatesResponse is response type for the Query/Certificates RPC method + ### MsgUpdateDeploymentResponse + MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `certificates` | [CertificateResponse](#akash.cert.v1beta3.CertificateResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + + + + +

Top

+ ## akash/deployment/v1beta2/service.proto @@ -4413,193 +4921,187 @@ the granter's account for a deployment. - + - ### Query - Query defines the gRPC querier service + ### Msg + Msg defines the deployment Msg service. | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Certificates` | [QueryCertificatesRequest](#akash.cert.v1beta3.QueryCertificatesRequest) | [QueryCertificatesResponse](#akash.cert.v1beta3.QueryCertificatesResponse) | Certificates queries certificates | GET|/akash/cert/v1beta3/certificates/list| + | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta2.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta2.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | + | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta2.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta2.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | + | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta2.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta2.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | + | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta2.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta2.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | + | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta2.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta2.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | + | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta2.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta2.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | + | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta2.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta2.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | - +

Top

- ## akash/cert/v1beta3/cert.proto + ## akash/deployment/v1beta2/authz.proto - + - ### Certificate - Certificate stores state, certificate and it's public key + ### DepositDeploymentAuthorization + DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from +the granter's account for a deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `state` | [Certificate.State](#akash.cert.v1beta3.Certificate.State) | | | - | `cert` | [bytes](#bytes) | | | - | `pubkey` | [bytes](#bytes) | | | + | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | - - + - ### CertificateFilter - CertificateFilter defines filters used to filter certificates + + + + + - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `serial` | [string](#string) | | | - | `state` | [string](#string) | | | + +

Top

+ ## akash/deployment/v1beta2/genesis.proto - + - ### CertificateID - CertificateID stores owner and sequence number + ### GenesisDeployment + GenesisDeployment defines the basic genesis state used by deployment module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `serial` | [string](#string) | | | + | `deployment` | [Deployment](#akash.deployment.v1beta2.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta2.Group) | repeated | | - + - ### MsgCreateCertificate - MsgCreateCertificate defines an SDK message for creating certificate + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `cert` | [bytes](#bytes) | | | - | `pubkey` | [bytes](#bytes) | | | + | `deployments` | [GenesisDeployment](#akash.deployment.v1beta2.GenesisDeployment) | repeated | | + | `params` | [Params](#akash.deployment.v1beta2.Params) | | | - - + - ### MsgCreateCertificateResponse - MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. + + + + + + + +

Top

+ ## akash/deployment/v1beta2/groupspec.proto - + - ### MsgRevokeCertificate - MsgRevokeCertificate defines an SDK message for revoking certificate + ### GroupSpec + GroupSpec stores group specifications | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [CertificateID](#akash.cert.v1beta3.CertificateID) | | | - - - - - + | `name` | [string](#string) | | | + | `requirements` | [akash.base.v1beta2.PlacementRequirements](#akash.base.v1beta2.PlacementRequirements) | | | + | `resources` | [Resource](#akash.deployment.v1beta2.Resource) | repeated | | - - - ### MsgRevokeCertificateResponse - MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. - - - - - ### Certificate.State - State is an enum which refers to state of deployment - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | valid | 1 | CertificateValid denotes state for deployment active | - | revoked | 2 | CertificateRevoked denotes state for deployment closed | - - - - - - ### Msg - Msg defines the provider Msg service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateCertificate` | [MsgCreateCertificate](#akash.cert.v1beta3.MsgCreateCertificate) | [MsgCreateCertificateResponse](#akash.cert.v1beta3.MsgCreateCertificateResponse) | CreateCertificate defines a method to create new certificate given proper inputs. | | - | `RevokeCertificate` | [MsgRevokeCertificate](#akash.cert.v1beta3.MsgRevokeCertificate) | [MsgRevokeCertificateResponse](#akash.cert.v1beta3.MsgRevokeCertificateResponse) | RevokeCertificate defines a method to revoke the certificate | | - - +

Top

- ## akash/cert/v1beta3/genesis.proto + ## akash/deployment/v1beta2/resource.proto - + - ### GenesisCertificate - GenesisCertificate defines certificate entry at genesis + ### Resource + Resource stores unit, total count and price of resource | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `certificate` | [Certificate](#akash.cert.v1beta3.Certificate) | | | + | `resources` | [akash.base.v1beta2.ResourceUnits](#akash.base.v1beta2.ResourceUnits) | | | + | `count` | [uint32](#uint32) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + + + + + + + + + + + + + + +

Top

+ ## akash/deployment/v1beta2/params.proto - + - ### GenesisState - GenesisState defines the basic genesis state used by cert module + ### Params + Params defines the parameters for the x/deployment package | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `certificates` | [GenesisCertificate](#akash.cert.v1beta3.GenesisCertificate) | repeated | | + | `deployment_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | @@ -4615,184 +5117,152 @@ the granter's account for a deployment. - +

Top

- ## akash/cert/v1beta2/query.proto + ## akash/deployment/v1beta1/group.proto - + - ### CertificateResponse - CertificateResponse contains a single X509 certificate and its serial number + ### Group + Group stores group id, state and specifications of group | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `certificate` | [Certificate](#akash.cert.v1beta2.Certificate) | | | - | `serial` | [string](#string) | | | + | `group_id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | + | `state` | [Group.State](#akash.deployment.v1beta1.Group.State) | | | + | `group_spec` | [GroupSpec](#akash.deployment.v1beta1.GroupSpec) | | | + | `created_at` | [int64](#int64) | | | - + - ### QueryCertificatesRequest - QueryDeploymentsRequest is request type for the Query/Deployments RPC method + ### GroupID + GroupID stores owner, deployment sequence number and group sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filter` | [CertificateFilter](#akash.cert.v1beta2.CertificateFilter) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | - + - ### QueryCertificatesResponse - QueryCertificatesResponse is response type for the Query/Certificates RPC method + ### GroupSpec + GroupSpec stores group specifications | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `certificates` | [CertificateResponse](#akash.cert.v1beta2.CertificateResponse) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `name` | [string](#string) | | | + | `requirements` | [akash.base.v1beta1.PlacementRequirements](#akash.base.v1beta1.PlacementRequirements) | | | + | `resources` | [Resource](#akash.deployment.v1beta1.Resource) | repeated | | - - - - - - - + - ### Query - Query defines the gRPC querier service + ### MsgCloseGroup + MsgCloseGroup defines SDK message to close a single Group within a Deployment. - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Certificates` | [QueryCertificatesRequest](#akash.cert.v1beta2.QueryCertificatesRequest) | [QueryCertificatesResponse](#akash.cert.v1beta2.QueryCertificatesResponse) | Certificates queries certificates | GET|/akash/cert/v1beta3/certificates/list| - - + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | - -

Top

- ## akash/cert/v1beta2/cert.proto - + - ### Certificate - Certificate stores state, certificate and it's public key + ### MsgCloseGroupResponse + MsgCloseGroupResponse defines the Msg/CloseGroup response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `state` | [Certificate.State](#akash.cert.v1beta2.Certificate.State) | | | - | `cert` | [bytes](#bytes) | | | - | `pubkey` | [bytes](#bytes) | | | - - - + - ### CertificateFilter - CertificateFilter defines filters used to filter certificates + ### MsgPauseGroup + MsgPauseGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `serial` | [string](#string) | | | - | `state` | [string](#string) | | | + | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | - + - ### CertificateID - CertificateID stores owner and sequence number + ### MsgPauseGroupResponse + MsgPauseGroupResponse defines the Msg/PauseGroup response type. - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `serial` | [string](#string) | | | - - - + - ### MsgCreateCertificate - MsgCreateCertificate defines an SDK message for creating certificate + ### MsgStartGroup + MsgStartGroup defines SDK message to close a single Group within a Deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `cert` | [bytes](#bytes) | | | - | `pubkey` | [bytes](#bytes) | | | + | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | - + - ### MsgCreateCertificateResponse - MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. + ### MsgStartGroupResponse + MsgStartGroupResponse defines the Msg/StartGroup response type. - + - ### MsgRevokeCertificate - MsgRevokeCertificate defines an SDK message for revoking certificate + ### Resource + Resource stores unit, total count and price of resource | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [CertificateID](#akash.cert.v1beta2.CertificateID) | | | - - - - - + | `resources` | [akash.base.v1beta1.ResourceUnits](#akash.base.v1beta1.ResourceUnits) | | | + | `count` | [uint32](#uint32) | | | + | `price` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - - - ### MsgRevokeCertificateResponse - MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. - @@ -4800,242 +5270,278 @@ the granter's account for a deployment. - + - ### Certificate.State - State is an enum which refers to state of deployment + ### Group.State + State is an enum which refers to state of group | Name | Number | Description | | ---- | ------ | ----------- | | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | - | valid | 1 | CertificateValid denotes state for deployment active | - | revoked | 2 | CertificateRevoked denotes state for deployment closed | + | open | 1 | GroupOpen denotes state for group open | + | paused | 2 | GroupOrdered denotes state for group ordered | + | insufficient_funds | 3 | GroupInsufficientFunds denotes state for group insufficient_funds | + | closed | 4 | GroupClosed denotes state for group closed | - - - - ### Msg - Msg defines the provider Msg service - - | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | - | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateCertificate` | [MsgCreateCertificate](#akash.cert.v1beta2.MsgCreateCertificate) | [MsgCreateCertificateResponse](#akash.cert.v1beta2.MsgCreateCertificateResponse) | CreateCertificate defines a method to create new certificate given proper inputs. | | - | `RevokeCertificate` | [MsgRevokeCertificate](#akash.cert.v1beta2.MsgRevokeCertificate) | [MsgRevokeCertificateResponse](#akash.cert.v1beta2.MsgRevokeCertificateResponse) | RevokeCertificate defines a method to revoke the certificate | | - - +

Top

- ## akash/cert/v1beta2/genesis.proto + ## akash/deployment/v1beta1/deployment.proto - + - ### GenesisCertificate - GenesisCertificate defines certificate entry at genesis + ### Deployment + Deployment stores deploymentID, state and version details | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `owner` | [string](#string) | | | - | `certificate` | [Certificate](#akash.cert.v1beta2.Certificate) | | | + | `deployment_id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | + | `state` | [Deployment.State](#akash.deployment.v1beta1.Deployment.State) | | | + | `version` | [bytes](#bytes) | | | + | `created_at` | [int64](#int64) | | | - + - ### GenesisState - GenesisState defines the basic genesis state used by cert module + ### DeploymentFilters + DeploymentFilters defines filters used to filter deployments | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `certificates` | [GenesisCertificate](#akash.cert.v1beta2.GenesisCertificate) | repeated | | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `state` | [string](#string) | | | - + + - + ### DeploymentID + DeploymentID stores owner and sequence number - + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + + - + + + + ### MsgCloseDeployment + MsgCloseDeployment defines an SDK message for closing deployment + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | + - -

Top

- ## akash/escrow/v1beta3/types.proto - + - ### Account - Account stores state for an escrow account + ### MsgCloseDeploymentResponse + MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. + + + + + + + + + ### MsgCreateDeployment + MsgCreateDeployment defines an SDK message for creating deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [AccountID](#akash.escrow.v1beta3.AccountID) | | unique identifier for this escrow account | - | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | - | `state` | [Account.State](#akash.escrow.v1beta3.Account.State) | | current state of this escrow account | - | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | unspent coins received from the owner's wallet | - | `transferred` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | total coins spent by this account | - | `settled_at` | [int64](#int64) | | block height at which this account was last settled | - | `depositor` | [string](#string) | | bech32 encoded account address of the depositor. If depositor is same as the owner, then any incoming coins are added to the Balance. If depositor isn't same as the owner, then any incoming coins are added to the Funds. | - | `funds` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | Funds are unspent coins received from the (non-Owner) Depositor's wallet. If there are any funds, they should be spent before spending the Balance. | + | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | + | `groups` | [GroupSpec](#akash.deployment.v1beta1.GroupSpec) | repeated | | + | `version` | [bytes](#bytes) | | | + | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - + - ### AccountID - AccountID is the account identifier + ### MsgCreateDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + + + + + + + + + ### MsgDepositDeployment + MsgDepositDeployment deposits more funds into the deposit account | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | + | `amount` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - + - ### FractionalPayment - Payment stores state for a payment + ### MsgDepositDeploymentResponse + MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. + + + + + + + + + ### MsgUpdateDeployment + MsgUpdateDeployment defines an SDK message for updating deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `account_id` | [AccountID](#akash.escrow.v1beta3.AccountID) | | | - | `payment_id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [FractionalPayment.State](#akash.escrow.v1beta3.FractionalPayment.State) | | | - | `rate` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | + | `groups` | [GroupSpec](#akash.deployment.v1beta1.GroupSpec) | repeated | | + | `version` | [bytes](#bytes) | | | - - - + - ### Account.State - State stores state for an escrow account + ### MsgUpdateDeploymentResponse + MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | AccountStateInvalid is an invalid state | - | open | 1 | AccountOpen is the state when an account is open | - | closed | 2 | AccountClosed is the state when an account is closed | - | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | - - ### FractionalPayment.State - Payment State + + + + + + ### Deployment.State + State is an enum which refers to state of deployment | Name | Number | Description | | ---- | ------ | ----------- | - | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | - | open | 1 | PaymentStateOpen is the state when the payment is open | - | closed | 2 | PaymentStateClosed is the state when the payment is closed | - | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | active | 1 | DeploymentActive denotes state for deployment active | + | closed | 2 | DeploymentClosed denotes state for deployment closed | + + + + ### Msg + Msg defines the deployment Msg service. + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `CreateDeployment` | [MsgCreateDeployment](#akash.deployment.v1beta1.MsgCreateDeployment) | [MsgCreateDeploymentResponse](#akash.deployment.v1beta1.MsgCreateDeploymentResponse) | CreateDeployment defines a method to create new deployment given proper inputs. | | + | `DepositDeployment` | [MsgDepositDeployment](#akash.deployment.v1beta1.MsgDepositDeployment) | [MsgDepositDeploymentResponse](#akash.deployment.v1beta1.MsgDepositDeploymentResponse) | DepositDeployment deposits more funds into the deployment account | | + | `UpdateDeployment` | [MsgUpdateDeployment](#akash.deployment.v1beta1.MsgUpdateDeployment) | [MsgUpdateDeploymentResponse](#akash.deployment.v1beta1.MsgUpdateDeploymentResponse) | UpdateDeployment defines a method to update a deployment given proper inputs. | | + | `CloseDeployment` | [MsgCloseDeployment](#akash.deployment.v1beta1.MsgCloseDeployment) | [MsgCloseDeploymentResponse](#akash.deployment.v1beta1.MsgCloseDeploymentResponse) | CloseDeployment defines a method to close a deployment given proper inputs. | | + | `CloseGroup` | [MsgCloseGroup](#akash.deployment.v1beta1.MsgCloseGroup) | [MsgCloseGroupResponse](#akash.deployment.v1beta1.MsgCloseGroupResponse) | CloseGroup defines a method to close a group of a deployment given proper inputs. | | + | `PauseGroup` | [MsgPauseGroup](#akash.deployment.v1beta1.MsgPauseGroup) | [MsgPauseGroupResponse](#akash.deployment.v1beta1.MsgPauseGroupResponse) | PauseGroup defines a method to close a group of a deployment given proper inputs. | | + | `StartGroup` | [MsgStartGroup](#akash.deployment.v1beta1.MsgStartGroup) | [MsgStartGroupResponse](#akash.deployment.v1beta1.MsgStartGroupResponse) | StartGroup defines a method to close a group of a deployment given proper inputs. | | + - +

Top

- ## akash/escrow/v1beta3/query.proto + ## akash/deployment/v1beta1/query.proto - + - ### QueryAccountsRequest - QueryAccountRequest is request type for the Query/Account RPC method + ### QueryDeploymentRequest + QueryDeploymentRequest is request type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `id` | [DeploymentID](#akash.deployment.v1beta1.DeploymentID) | | | - + - ### QueryAccountsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ### QueryDeploymentResponse + QueryDeploymentResponse is response type for the Query/Deployment RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta3.Account) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `deployment` | [Deployment](#akash.deployment.v1beta1.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta1.Group) | repeated | | + | `escrow_account` | [akash.escrow.v1beta1.Account](#akash.escrow.v1beta1.Account) | | | - + - ### QueryPaymentsRequest - QueryPaymentRequest is request type for the Query/Payment RPC method + ### QueryDeploymentsRequest + QueryDeploymentsRequest is request type for the Query/Deployments RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | + | `filters` | [DeploymentFilters](#akash.deployment.v1beta1.DeploymentFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -5043,21 +5549,51 @@ the granter's account for a deployment. - + - ### QueryPaymentsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ### QueryDeploymentsResponse + QueryDeploymentsResponse is response type for the Query/Deployments RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `payments` | [FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | repeated | | + | `deployments` | [QueryDeploymentResponse](#akash.deployment.v1beta1.QueryDeploymentResponse) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + ### QueryGroupRequest + QueryGroupRequest is request type for the Query/Group RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [GroupID](#akash.deployment.v1beta1.GroupID) | | | + + + + + + + + + ### QueryGroupResponse + QueryGroupResponse is response type for the Query/Group RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `group` | [Group](#akash.deployment.v1beta1.Group) | | | + + + + + @@ -5065,37 +5601,38 @@ the granter's account for a deployment. - + ### Query Query defines the gRPC querier service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1beta3.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1beta3.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1beta3/types/accounts/list| - | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1beta3.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1beta3.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1beta3/types/payments/list| + | `Deployments` | [QueryDeploymentsRequest](#akash.deployment.v1beta1.QueryDeploymentsRequest) | [QueryDeploymentsResponse](#akash.deployment.v1beta1.QueryDeploymentsResponse) | Deployments queries deployments | GET|/akash/deployment/v1beta1/deployments/list| + | `Deployment` | [QueryDeploymentRequest](#akash.deployment.v1beta1.QueryDeploymentRequest) | [QueryDeploymentResponse](#akash.deployment.v1beta1.QueryDeploymentResponse) | Deployment queries deployment details | GET|/akash/deployment/v1beta1/deployments/info| + | `Group` | [QueryGroupRequest](#akash.deployment.v1beta1.QueryGroupRequest) | [QueryGroupResponse](#akash.deployment.v1beta1.QueryGroupResponse) | Group queries group details | GET|/akash/deployment/v1beta1/groups/info| - +

Top

- ## akash/escrow/v1beta3/genesis.proto + ## akash/deployment/v1beta1/authz.proto - + - ### GenesisState - GenesisState defines the basic genesis state used by escrow module + ### DepositDeploymentAuthorization + DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from +the granter's account for a deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta3.Account) | repeated | | - | `payments` | [FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | repeated | | + | `spend_limit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | SpendLimit is the amount the grantee is authorized to spend from the granter's account for the purpose of deployment. | @@ -5111,66 +5648,70 @@ the granter's account for a deployment. - +

Top

- ## akash/escrow/v1beta2/types.proto + ## akash/deployment/v1beta1/genesis.proto - + - ### Account - Account stores state for an escrow account + ### GenesisDeployment + GenesisDeployment defines the basic genesis state used by deployment module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [AccountID](#akash.escrow.v1beta2.AccountID) | | unique identifier for this escrow account | - | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | - | `state` | [Account.State](#akash.escrow.v1beta2.Account.State) | | current state of this escrow account | - | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | unspent coins received from the owner's wallet | - | `transferred` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | total coins spent by this account | - | `settled_at` | [int64](#int64) | | block height at which this account was last settled | - | `depositor` | [string](#string) | | bech32 encoded account address of the depositor. If depositor is same as the owner, then any incoming coins are added to the Balance. If depositor isn't same as the owner, then any incoming coins are added to the Funds. | - | `funds` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | Funds are unspent coins received from the (non-Owner) Depositor's wallet. If there are any funds, they should be spent before spending the Balance. | + | `deployment` | [Deployment](#akash.deployment.v1beta1.Deployment) | | | + | `groups` | [Group](#akash.deployment.v1beta1.Group) | repeated | | - + - ### AccountID - AccountID is the account identifier + ### GenesisState + GenesisState stores slice of genesis deployment instance | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | + | `deployments` | [GenesisDeployment](#akash.deployment.v1beta1.GenesisDeployment) | repeated | | + | `params` | [Params](#akash.deployment.v1beta1.Params) | | | + + + + + + + + + + + + + + +

Top

+ ## akash/deployment/v1beta1/params.proto - + - ### FractionalPayment - Payment stores state for a payment + ### Params + Params defines the parameters for the x/deployment package | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `account_id` | [AccountID](#akash.escrow.v1beta2.AccountID) | | | - | `payment_id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [FractionalPayment.State](#akash.escrow.v1beta2.FractionalPayment.State) | | | - | `rate` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | - | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `deployment_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | @@ -5178,34 +5719,37 @@ the granter's account for a deployment. - - + - ### Account.State - State stores state for an escrow account + + + - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | AccountStateInvalid is an invalid state | - | open | 1 | AccountOpen is the state when an account is open | - | closed | 2 | AccountClosed is the state when an account is closed | - | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | + + +

Top

+ ## akash/staking/v1beta3/genesis.proto - - ### FractionalPayment.State - Payment State + + + + ### GenesisState + GenesisState stores slice of genesis deployment instance + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `params` | [Params](#akash.staking.v1beta3.Params) | | | + + - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | - | open | 1 | PaymentStateOpen is the state when the payment is open | - | closed | 2 | PaymentStateClosed is the state when the payment is closed | - | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | + + @@ -5214,61 +5758,69 @@ the granter's account for a deployment. - +

Top

- ## akash/escrow/v1beta2/query.proto + ## akash/staking/v1beta3/params.proto - + - ### QueryAccountsRequest - QueryAccountRequest is request type for the Query/Account RPC method + ### Params + Params extends the parameters for the x/staking module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `min_commission_rate` | [string](#string) | | min_commission_rate is the chain-wide minimum commission rate that a validator can charge their delegators | + + + + + + + + - + + +

Top

- ### QueryAccountsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ## akash/cert/v1beta3/query.proto + + + + + + ### CertificateResponse + CertificateResponse contains a single X509 certificate and its serial number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta2.Account) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `certificate` | [Certificate](#akash.cert.v1beta3.Certificate) | | | + | `serial` | [string](#string) | | | - + - ### QueryPaymentsRequest - QueryPaymentRequest is request type for the Query/Payment RPC method + ### QueryCertificatesRequest + QueryDeploymentsRequest is request type for the Query/Deployments RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | + | `filter` | [CertificateFilter](#akash.cert.v1beta3.CertificateFilter) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -5276,15 +5828,15 @@ the granter's account for a deployment. - + - ### QueryPaymentsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ### QueryCertificatesResponse + QueryCertificatesResponse is response type for the Query/Certificates RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `payments` | [FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) | repeated | | + | `certificates` | [CertificateResponse](#akash.cert.v1beta3.CertificateResponse) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -5298,208 +5850,240 @@ the granter's account for a deployment. - + ### Query Query defines the gRPC querier service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1beta2.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1beta2.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1beta2/types/accounts/list| - | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1beta2.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1beta2.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1beta2/types/payments/list| + | `Certificates` | [QueryCertificatesRequest](#akash.cert.v1beta3.QueryCertificatesRequest) | [QueryCertificatesResponse](#akash.cert.v1beta3.QueryCertificatesResponse) | Certificates queries certificates | GET|/akash/cert/v1beta3/certificates/list| - +

Top

- ## akash/escrow/v1beta2/genesis.proto + ## akash/cert/v1beta3/cert.proto - + - ### GenesisState - GenesisState defines the basic genesis state used by escrow module + ### Certificate + Certificate stores state, certificate and it's public key | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta2.Account) | repeated | | - | `payments` | [FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) | repeated | | + | `state` | [Certificate.State](#akash.cert.v1beta3.Certificate.State) | | | + | `cert` | [bytes](#bytes) | | | + | `pubkey` | [bytes](#bytes) | | | - - - - - + + - + ### CertificateFilter + CertificateFilter defines filters used to filter certificates + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `serial` | [string](#string) | | | + | `state` | [string](#string) | | | + - -

Top

- ## akash/escrow/v1beta1/types.proto - + - ### Account - Account stores state for an escrow account + ### CertificateID + CertificateID stores owner and sequence number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [AccountID](#akash.escrow.v1beta1.AccountID) | | unique identifier for this escrow account | - | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | - | `state` | [Account.State](#akash.escrow.v1beta1.Account.State) | | current state of this escrow account | - | `balance` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | unspent coins received from the owner's wallet | - | `transferred` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | total coins spent by this account | - | `settled_at` | [int64](#int64) | | block height at which this account was last settled | + | `owner` | [string](#string) | | | + | `serial` | [string](#string) | | | - + - ### AccountID - AccountID is the account identifier + ### MsgCreateCertificate + MsgCreateCertificate defines an SDK message for creating certificate | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `cert` | [bytes](#bytes) | | | + | `pubkey` | [bytes](#bytes) | | | - + - ### Payment - Payment stores state for a payment + ### MsgCreateCertificateResponse + MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. + + + + + + + + + ### MsgRevokeCertificate + MsgRevokeCertificate defines an SDK message for revoking certificate | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `account_id` | [AccountID](#akash.escrow.v1beta1.AccountID) | | | - | `payment_id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [Payment.State](#akash.escrow.v1beta1.Payment.State) | | | - | `rate` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `balance` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `id` | [CertificateID](#akash.cert.v1beta3.CertificateID) | | | - + + + + ### MsgRevokeCertificateResponse + MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. - - ### Account.State - State stores state for an escrow account - - | Name | Number | Description | - | ---- | ------ | ----------- | - | invalid | 0 | AccountStateInvalid is an invalid state | - | open | 1 | AccountOpen is the state when an account is open | - | closed | 2 | AccountClosed is the state when an account is closed | - | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | + + - + - ### Payment.State - Payment State + ### Certificate.State + State is an enum which refers to state of deployment | Name | Number | Description | | ---- | ------ | ----------- | - | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | - | open | 1 | PaymentStateOpen is the state when the payment is open | - | closed | 2 | PaymentStateClosed is the state when the payment is closed | - | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | valid | 1 | CertificateValid denotes state for deployment active | + | revoked | 2 | CertificateRevoked denotes state for deployment closed | + + + + ### Msg + Msg defines the provider Msg service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `CreateCertificate` | [MsgCreateCertificate](#akash.cert.v1beta3.MsgCreateCertificate) | [MsgCreateCertificateResponse](#akash.cert.v1beta3.MsgCreateCertificateResponse) | CreateCertificate defines a method to create new certificate given proper inputs. | | + | `RevokeCertificate` | [MsgRevokeCertificate](#akash.cert.v1beta3.MsgRevokeCertificate) | [MsgRevokeCertificateResponse](#akash.cert.v1beta3.MsgRevokeCertificateResponse) | RevokeCertificate defines a method to revoke the certificate | | + - +

Top

- ## akash/escrow/v1beta1/query.proto + ## akash/cert/v1beta3/genesis.proto - + - ### QueryAccountsRequest - QueryAccountRequest is request type for the Query/Account RPC method + ### GenesisCertificate + GenesisCertificate defines certificate entry at genesis | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | - | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + | `certificate` | [Certificate](#akash.cert.v1beta3.Certificate) | | | - + - ### QueryAccountsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ### GenesisState + GenesisState defines the basic genesis state used by cert module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta1.Account) | repeated | | - | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + | `certificates` | [GenesisCertificate](#akash.cert.v1beta3.GenesisCertificate) | repeated | | + + + + + + + + + + + + + + +

Top

+ ## akash/cert/v1beta2/query.proto - + - ### QueryPaymentsRequest - QueryPaymentRequest is request type for the Query/Payment RPC method + ### CertificateResponse + CertificateResponse contains a single X509 certificate and its serial number | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `scope` | [string](#string) | | | - | `xid` | [string](#string) | | | - | `id` | [string](#string) | | | - | `owner` | [string](#string) | | | - | `state` | [string](#string) | | | + | `certificate` | [Certificate](#akash.cert.v1beta2.Certificate) | | | + | `serial` | [string](#string) | | | + + + + + + + + + ### QueryCertificatesRequest + QueryDeploymentsRequest is request type for the Query/Deployments RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `filter` | [CertificateFilter](#akash.cert.v1beta2.CertificateFilter) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -5507,15 +6091,15 @@ the granter's account for a deployment. - + - ### QueryPaymentsResponse - QueryProvidersResponse is response type for the Query/Providers RPC method + ### QueryCertificatesResponse + QueryCertificatesResponse is response type for the Query/Certificates RPC method | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `payments` | [Payment](#akash.escrow.v1beta1.Payment) | repeated | | + | `certificates` | [CertificateResponse](#akash.cert.v1beta2.CertificateResponse) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -5529,37 +6113,1614 @@ the granter's account for a deployment. - + ### Query Query defines the gRPC querier service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1beta1.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1beta1.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1beta1/types/accounts/list| - | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1beta1.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1beta1.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1beta1/types/payments/list| + | `Certificates` | [QueryCertificatesRequest](#akash.cert.v1beta2.QueryCertificatesRequest) | [QueryCertificatesResponse](#akash.cert.v1beta2.QueryCertificatesResponse) | Certificates queries certificates | GET|/akash/cert/v1beta3/certificates/list| + + + + + + +

Top

+ + ## akash/cert/v1beta2/cert.proto + + + + + + ### Certificate + Certificate stores state, certificate and it's public key + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `state` | [Certificate.State](#akash.cert.v1beta2.Certificate.State) | | | + | `cert` | [bytes](#bytes) | | | + | `pubkey` | [bytes](#bytes) | | | + + + + + + + + + ### CertificateFilter + CertificateFilter defines filters used to filter certificates + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `serial` | [string](#string) | | | + | `state` | [string](#string) | | | + + + + + + + + + ### CertificateID + CertificateID stores owner and sequence number + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `serial` | [string](#string) | | | + + + + + + + + + ### MsgCreateCertificate + MsgCreateCertificate defines an SDK message for creating certificate + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `cert` | [bytes](#bytes) | | | + | `pubkey` | [bytes](#bytes) | | | + + + + + + + + + ### MsgCreateCertificateResponse + MsgCreateCertificateResponse defines the Msg/CreateCertificate response type. + + + + + + + + + ### MsgRevokeCertificate + MsgRevokeCertificate defines an SDK message for revoking certificate + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [CertificateID](#akash.cert.v1beta2.CertificateID) | | | + + + + + + + + + ### MsgRevokeCertificateResponse + MsgRevokeCertificateResponse defines the Msg/RevokeCertificate response type. + + + + + + + + + + + ### Certificate.State + State is an enum which refers to state of deployment + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | valid | 1 | CertificateValid denotes state for deployment active | + | revoked | 2 | CertificateRevoked denotes state for deployment closed | + + + + + + + + + + ### Msg + Msg defines the provider Msg service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `CreateCertificate` | [MsgCreateCertificate](#akash.cert.v1beta2.MsgCreateCertificate) | [MsgCreateCertificateResponse](#akash.cert.v1beta2.MsgCreateCertificateResponse) | CreateCertificate defines a method to create new certificate given proper inputs. | | + | `RevokeCertificate` | [MsgRevokeCertificate](#akash.cert.v1beta2.MsgRevokeCertificate) | [MsgRevokeCertificateResponse](#akash.cert.v1beta2.MsgRevokeCertificateResponse) | RevokeCertificate defines a method to revoke the certificate | | + + + + + + +

Top

+ + ## akash/cert/v1beta2/genesis.proto + + + + + + ### GenesisCertificate + GenesisCertificate defines certificate entry at genesis + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `certificate` | [Certificate](#akash.cert.v1beta2.Certificate) | | | + + + + + + + + + ### GenesisState + GenesisState defines the basic genesis state used by cert module + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `certificates` | [GenesisCertificate](#akash.cert.v1beta2.GenesisCertificate) | repeated | | + + + + + + + + + + + + + + + + +

Top

+ + ## akash/escrow/v1beta3/types.proto + + + + + + ### Account + Account stores state for an escrow account + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [AccountID](#akash.escrow.v1beta3.AccountID) | | unique identifier for this escrow account | + | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | + | `state` | [Account.State](#akash.escrow.v1beta3.Account.State) | | current state of this escrow account | + | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | unspent coins received from the owner's wallet | + | `transferred` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | total coins spent by this account | + | `settled_at` | [int64](#int64) | | block height at which this account was last settled | + | `depositor` | [string](#string) | | bech32 encoded account address of the depositor. If depositor is same as the owner, then any incoming coins are added to the Balance. If depositor isn't same as the owner, then any incoming coins are added to the Funds. | + | `funds` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | Funds are unspent coins received from the (non-Owner) Depositor's wallet. If there are any funds, they should be spent before spending the Balance. | + + + + + + + + + ### AccountID + AccountID is the account identifier + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + + + + + + + + + ### FractionalPayment + Payment stores state for a payment + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `account_id` | [AccountID](#akash.escrow.v1beta3.AccountID) | | | + | `payment_id` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [FractionalPayment.State](#akash.escrow.v1beta3.FractionalPayment.State) | | | + | `rate` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + + + + + + + + + + + ### Account.State + State stores state for an escrow account + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | AccountStateInvalid is an invalid state | + | open | 1 | AccountOpen is the state when an account is open | + | closed | 2 | AccountClosed is the state when an account is closed | + | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | + + + + + + ### FractionalPayment.State + Payment State + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | + | open | 1 | PaymentStateOpen is the state when the payment is open | + | closed | 2 | PaymentStateClosed is the state when the payment is closed | + | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | + + + + + + + + + + + +

Top

+ + ## akash/escrow/v1beta3/query.proto + + + + + + ### QueryAccountsRequest + QueryAccountRequest is request type for the Query/Account RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryAccountsResponse + QueryProvidersResponse is response type for the Query/Providers RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `accounts` | [Account](#akash.escrow.v1beta3.Account) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + ### QueryPaymentsRequest + QueryPaymentRequest is request type for the Query/Payment RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + | `id` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryPaymentsResponse + QueryProvidersResponse is response type for the Query/Providers RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `payments` | [FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + + + + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1beta3.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1beta3.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1beta3/types/accounts/list| + | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1beta3.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1beta3.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1beta3/types/payments/list| + + + + + + +

Top

+ + ## akash/escrow/v1beta3/genesis.proto + + + + + + ### GenesisState + GenesisState defines the basic genesis state used by escrow module + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `accounts` | [Account](#akash.escrow.v1beta3.Account) | repeated | | + | `payments` | [FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | repeated | | + + + + + + + + + + + + + + + + +

Top

+ + ## akash/escrow/v1beta2/types.proto + + + + + + ### Account + Account stores state for an escrow account + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [AccountID](#akash.escrow.v1beta2.AccountID) | | unique identifier for this escrow account | + | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | + | `state` | [Account.State](#akash.escrow.v1beta2.Account.State) | | current state of this escrow account | + | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | unspent coins received from the owner's wallet | + | `transferred` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | total coins spent by this account | + | `settled_at` | [int64](#int64) | | block height at which this account was last settled | + | `depositor` | [string](#string) | | bech32 encoded account address of the depositor. If depositor is same as the owner, then any incoming coins are added to the Balance. If depositor isn't same as the owner, then any incoming coins are added to the Funds. | + | `funds` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | Funds are unspent coins received from the (non-Owner) Depositor's wallet. If there are any funds, they should be spent before spending the Balance. | + + + + + + + + + ### AccountID + AccountID is the account identifier + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + + + + + + + + + ### FractionalPayment + Payment stores state for a payment + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `account_id` | [AccountID](#akash.escrow.v1beta2.AccountID) | | | + | `payment_id` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [FractionalPayment.State](#akash.escrow.v1beta2.FractionalPayment.State) | | | + | `rate` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `balance` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + + + + + + + + + + + ### Account.State + State stores state for an escrow account + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | AccountStateInvalid is an invalid state | + | open | 1 | AccountOpen is the state when an account is open | + | closed | 2 | AccountClosed is the state when an account is closed | + | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | + + + + + + ### FractionalPayment.State + Payment State + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | + | open | 1 | PaymentStateOpen is the state when the payment is open | + | closed | 2 | PaymentStateClosed is the state when the payment is closed | + | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | + + + + + + + + + + + +

Top

+ + ## akash/escrow/v1beta2/query.proto + + + + + + ### QueryAccountsRequest + QueryAccountRequest is request type for the Query/Account RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryAccountsResponse + QueryProvidersResponse is response type for the Query/Providers RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `accounts` | [Account](#akash.escrow.v1beta2.Account) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + ### QueryPaymentsRequest + QueryPaymentRequest is request type for the Query/Payment RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + | `id` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryPaymentsResponse + QueryProvidersResponse is response type for the Query/Providers RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `payments` | [FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + + + + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1beta2.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1beta2.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1beta2/types/accounts/list| + | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1beta2.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1beta2.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1beta2/types/payments/list| + + + + + + +

Top

+ + ## akash/escrow/v1beta2/genesis.proto + + + + + + ### GenesisState + GenesisState defines the basic genesis state used by escrow module + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `accounts` | [Account](#akash.escrow.v1beta2.Account) | repeated | | + | `payments` | [FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) | repeated | | + + + + + + + + + + + + + + + + +

Top

+ + ## akash/escrow/v1beta1/types.proto + + + + + + ### Account + Account stores state for an escrow account + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [AccountID](#akash.escrow.v1beta1.AccountID) | | unique identifier for this escrow account | + | `owner` | [string](#string) | | bech32 encoded account address of the owner of this escrow account | + | `state` | [Account.State](#akash.escrow.v1beta1.Account.State) | | current state of this escrow account | + | `balance` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | unspent coins received from the owner's wallet | + | `transferred` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | total coins spent by this account | + | `settled_at` | [int64](#int64) | | block height at which this account was last settled | + + + + + + + + + ### AccountID + AccountID is the account identifier + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + + + + + + + + + ### Payment + Payment stores state for a payment + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `account_id` | [AccountID](#akash.escrow.v1beta1.AccountID) | | | + | `payment_id` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [Payment.State](#akash.escrow.v1beta1.Payment.State) | | | + | `rate` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `balance` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `withdrawn` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + + + + + + + + + + + ### Account.State + State stores state for an escrow account + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | AccountStateInvalid is an invalid state | + | open | 1 | AccountOpen is the state when an account is open | + | closed | 2 | AccountClosed is the state when an account is closed | + | overdrawn | 3 | AccountOverdrawn is the state when an account is overdrawn | + + + + + + ### Payment.State + Payment State + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | PaymentStateInvalid is the state when the payment is invalid | + | open | 1 | PaymentStateOpen is the state when the payment is open | + | closed | 2 | PaymentStateClosed is the state when the payment is closed | + | overdrawn | 3 | PaymentStateOverdrawn is the state when the payment is overdrawn | + + + + + + + + + + + +

Top

+ + ## akash/escrow/v1beta1/query.proto + + + + + + ### QueryAccountsRequest + QueryAccountRequest is request type for the Query/Account RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryAccountsResponse + QueryProvidersResponse is response type for the Query/Providers RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `accounts` | [Account](#akash.escrow.v1beta1.Account) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + ### QueryPaymentsRequest + QueryPaymentRequest is request type for the Query/Payment RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `scope` | [string](#string) | | | + | `xid` | [string](#string) | | | + | `id` | [string](#string) | | | + | `owner` | [string](#string) | | | + | `state` | [string](#string) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryPaymentsResponse + QueryProvidersResponse is response type for the Query/Providers RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `payments` | [Payment](#akash.escrow.v1beta1.Payment) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + + + + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Accounts` | [QueryAccountsRequest](#akash.escrow.v1beta1.QueryAccountsRequest) | [QueryAccountsResponse](#akash.escrow.v1beta1.QueryAccountsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Accounts queries all accounts | GET|/akash/escrow/v1beta1/types/accounts/list| + | `Payments` | [QueryPaymentsRequest](#akash.escrow.v1beta1.QueryPaymentsRequest) | [QueryPaymentsResponse](#akash.escrow.v1beta1.QueryPaymentsResponse) | buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE buf:lint:ignore RPC_RESPONSE_STANDARD_NAME Payments queries all payments | GET|/akash/escrow/v1beta1/types/payments/list| + + + + + + +

Top

+ + ## akash/escrow/v1beta1/genesis.proto + + + + + + ### GenesisState + GenesisState defines the basic genesis state used by escrow module + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `accounts` | [Account](#akash.escrow.v1beta1.Account) | repeated | | + | `payments` | [Payment](#akash.escrow.v1beta1.Payment) | repeated | | + + + + + + + + + + + + + + + + +

Top

+ + ## akash/market/v1beta4/bid.proto + + + + + + ### Bid + Bid stores BidID, state of bid and price + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `bid_id` | [BidID](#akash.market.v1beta4.BidID) | | | + | `state` | [Bid.State](#akash.market.v1beta4.Bid.State) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `created_at` | [int64](#int64) | | | + | `resources_offer` | [ResourceOffer](#akash.market.v1beta4.ResourceOffer) | repeated | | + + + + + + + + + ### BidFilters + BidFilters defines flags for bid list filter + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `provider` | [string](#string) | | | + | `state` | [string](#string) | | | + + + + + + + + + ### BidID + BidID stores owner and all other seq numbers +A successful bid becomes a Lease(ID). + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `provider` | [string](#string) | | | + + + + + + + + + ### MsgCloseBid + MsgCloseBid defines an SDK message for closing bid + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `bid_id` | [BidID](#akash.market.v1beta4.BidID) | | | + + + + + + + + + ### MsgCloseBidResponse + MsgCloseBidResponse defines the Msg/CloseBid response type. + + + + + + + + + ### MsgCreateBid + MsgCreateBid defines an SDK message for creating Bid + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `order` | [OrderID](#akash.market.v1beta4.OrderID) | | | + | `provider` | [string](#string) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `resources_offer` | [ResourceOffer](#akash.market.v1beta4.ResourceOffer) | repeated | | + + + + + + + + + ### MsgCreateBidResponse + MsgCreateBidResponse defines the Msg/CreateBid response type. + + + + + + + + + ### ResourceOffer + ResourceOffer describes resources that provider is offering +for deployment + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `resources` | [akash.base.v1beta3.Resources](#akash.base.v1beta3.Resources) | | | + | `count` | [uint32](#uint32) | | | + + + + + + + + + + + ### Bid.State + State is an enum which refers to state of bid + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | open | 1 | BidOpen denotes state for bid open | + | active | 2 | BidMatched denotes state for bid open | + | lost | 3 | BidLost denotes state for bid lost | + | closed | 4 | BidClosed denotes state for bid closed | + + + + + + + + + + + +

Top

+ + ## akash/market/v1beta4/query.proto + + + + + + ### QueryBidRequest + QueryBidRequest is request type for the Query/Bid RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [BidID](#akash.market.v1beta4.BidID) | | | + + + + + + + + + ### QueryBidResponse + QueryBidResponse is response type for the Query/Bid RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `bid` | [Bid](#akash.market.v1beta4.Bid) | | | + | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | + + + + + + + + + ### QueryBidsRequest + QueryBidsRequest is request type for the Query/Bids RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `filters` | [BidFilters](#akash.market.v1beta4.BidFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryBidsResponse + QueryBidsResponse is response type for the Query/Bids RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `bids` | [QueryBidResponse](#akash.market.v1beta4.QueryBidResponse) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + ### QueryLeaseRequest + QueryLeaseRequest is request type for the Query/Lease RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | + + + + + + + + + ### QueryLeaseResponse + QueryLeaseResponse is response type for the Query/Lease RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `lease` | [Lease](#akash.market.v1beta4.Lease) | | | + | `escrow_payment` | [akash.escrow.v1beta3.FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | | | + + + + + + + + + ### QueryLeasesRequest + QueryLeasesRequest is request type for the Query/Leases RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `filters` | [LeaseFilters](#akash.market.v1beta4.LeaseFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryLeasesResponse + QueryLeasesResponse is response type for the Query/Leases RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `leases` | [QueryLeaseResponse](#akash.market.v1beta4.QueryLeaseResponse) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + ### QueryOrderRequest + QueryOrderRequest is request type for the Query/Order RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `id` | [OrderID](#akash.market.v1beta4.OrderID) | | | + + + + + + + + + ### QueryOrderResponse + QueryOrderResponse is response type for the Query/Order RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `order` | [Order](#akash.market.v1beta4.Order) | | | + + + + + + + + + ### QueryOrdersRequest + QueryOrdersRequest is request type for the Query/Orders RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `filters` | [OrderFilters](#akash.market.v1beta4.OrderFilters) | | | + | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | + + + + + + + + + ### QueryOrdersResponse + QueryOrdersResponse is response type for the Query/Orders RPC method + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `orders` | [Order](#akash.market.v1beta4.Order) | repeated | | + | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | + + + + + + + + + + + + + + + ### Query + Query defines the gRPC querier service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `Orders` | [QueryOrdersRequest](#akash.market.v1beta4.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta4.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta4/orders/list| + | `Order` | [QueryOrderRequest](#akash.market.v1beta4.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta4.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta4/orders/info| + | `Bids` | [QueryBidsRequest](#akash.market.v1beta4.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta4.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta4/bids/list| + | `Bid` | [QueryBidRequest](#akash.market.v1beta4.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta4.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta4/bids/info| + | `Leases` | [QueryLeasesRequest](#akash.market.v1beta4.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta4.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta4/leases/list| + | `Lease` | [QueryLeaseRequest](#akash.market.v1beta4.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta4.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta4/leases/info| + + + + + + +

Top

+ + ## akash/market/v1beta4/service.proto + + + + + + + + + + + + ### Msg + Msg defines the market Msg service + + | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | + | ----------- | ------------ | ------------- | ------------| ------- | -------- | + | `CreateBid` | [MsgCreateBid](#akash.market.v1beta4.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta4.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | + | `CloseBid` | [MsgCloseBid](#akash.market.v1beta4.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta4.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | + | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta4.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta4.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | + | `CreateLease` | [MsgCreateLease](#akash.market.v1beta4.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta4.MsgCreateLeaseResponse) | CreateLease creates a new lease | | + | `CloseLease` | [MsgCloseLease](#akash.market.v1beta4.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta4.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | + + + + + + +

Top

+ + ## akash/market/v1beta4/lease.proto + + + + + + ### Lease + Lease stores LeaseID, state of lease and price + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `lease_id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | + | `state` | [Lease.State](#akash.market.v1beta4.Lease.State) | | | + | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | + | `created_at` | [int64](#int64) | | | + | `closed_on` | [int64](#int64) | | | + + + + + + + + + ### LeaseFilters + LeaseFilters defines flags for lease list filter + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `provider` | [string](#string) | | | + | `state` | [string](#string) | | | + + + + + + + + + ### LeaseID + LeaseID stores bid details of lease + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `provider` | [string](#string) | | | + + + + + + + + + ### MsgCloseLease + MsgCloseLease defines an SDK message for closing order + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `lease_id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | + + + + + + + + + ### MsgCloseLeaseResponse + MsgCloseLeaseResponse defines the Msg/CloseLease response type. + + + + + + + + + ### MsgCreateLease + MsgCreateLease is sent to create a lease + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `bid_id` | [BidID](#akash.market.v1beta4.BidID) | | | + + + + + + + + + ### MsgCreateLeaseResponse + MsgCreateLeaseResponse is the response from creating a lease + + + + + + + + + ### MsgWithdrawLease + MsgWithdrawLease defines an SDK message for closing bid + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `bid_id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | + + + + + + + + + ### MsgWithdrawLeaseResponse + MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. + + + + + + + + + + + ### Lease.State + State is an enum which refers to state of lease + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | active | 1 | LeaseActive denotes state for lease active | + | insufficient_funds | 2 | LeaseInsufficientFunds denotes state for lease insufficient_funds | + | closed | 3 | LeaseClosed denotes state for lease closed | + + + + + + + + + + + +

Top

+ + ## akash/market/v1beta4/genesis.proto + + + + + + ### GenesisState + GenesisState defines the basic genesis state used by market module + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `params` | [Params](#akash.market.v1beta4.Params) | | | + | `orders` | [Order](#akash.market.v1beta4.Order) | repeated | | + | `leases` | [Lease](#akash.market.v1beta4.Lease) | repeated | | + | `bids` | [Bid](#akash.market.v1beta4.Bid) | repeated | | + + + + + + + + + + + + + + + + +

Top

+ + ## akash/market/v1beta4/order.proto + + + + + + ### Order + Order stores orderID, state of order and other details + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `order_id` | [OrderID](#akash.market.v1beta4.OrderID) | | | + | `state` | [Order.State](#akash.market.v1beta4.Order.State) | | | + | `spec` | [akash.deployment.v1beta3.GroupSpec](#akash.deployment.v1beta3.GroupSpec) | | | + | `created_at` | [int64](#int64) | | | + + + + + + + + + ### OrderFilters + OrderFilters defines flags for order list filter + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + | `state` | [string](#string) | | | + + + + + + + + + ### OrderID + OrderID stores owner and all other seq numbers + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `owner` | [string](#string) | | | + | `dseq` | [uint64](#uint64) | | | + | `gseq` | [uint32](#uint32) | | | + | `oseq` | [uint32](#uint32) | | | + + + + + + + + + + + ### Order.State + State is an enum which refers to state of order + + | Name | Number | Description | + | ---- | ------ | ----------- | + | invalid | 0 | Prefix should start with 0 in enum. So declaring dummy state | + | open | 1 | OrderOpen denotes state for order open | + | active | 2 | OrderMatched denotes state for order matched | + | closed | 3 | OrderClosed denotes state for order lost | + + + + + - +

Top

- ## akash/escrow/v1beta1/genesis.proto + ## akash/market/v1beta4/params.proto - + - ### GenesisState - GenesisState defines the basic genesis state used by escrow module + ### Params + Params is the params for the x/market module | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `accounts` | [Account](#akash.escrow.v1beta1.Account) | repeated | | - | `payments` | [Payment](#akash.escrow.v1beta1.Payment) | repeated | | + | `bid_min_deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `order_max_bids` | [uint32](#uint32) | | | @@ -5575,14 +7736,14 @@ the granter's account for a deployment. - +

Top

- ## akash/market/v1beta4/bid.proto + ## akash/market/v1beta3/bid.proto - + ### Bid Bid stores BidID, state of bid and price @@ -5590,18 +7751,17 @@ the granter's account for a deployment. | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta4.BidID) | | | - | `state` | [Bid.State](#akash.market.v1beta4.Bid.State) | | | + | `bid_id` | [BidID](#akash.market.v1beta3.BidID) | | | + | `state` | [Bid.State](#akash.market.v1beta3.Bid.State) | | | | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `created_at` | [int64](#int64) | | | - | `resources_offer` | [ResourceOffer](#akash.market.v1beta4.ResourceOffer) | repeated | | - + ### BidFilters BidFilters defines flags for bid list filter @@ -5621,7 +7781,7 @@ the granter's account for a deployment. - + ### BidID BidID stores owner and all other seq numbers @@ -5641,7 +7801,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCloseBid MsgCloseBid defines an SDK message for closing bid @@ -5649,14 +7809,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta4.BidID) | | | + | `bid_id` | [BidID](#akash.market.v1beta3.BidID) | | | - + ### MsgCloseBidResponse MsgCloseBidResponse defines the Msg/CloseBid response type. @@ -5666,7 +7826,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCreateBid MsgCreateBid defines an SDK message for creating Bid @@ -5674,18 +7834,17 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order` | [OrderID](#akash.market.v1beta4.OrderID) | | | + | `order` | [OrderID](#akash.market.v1beta3.OrderID) | | | | `provider` | [string](#string) | | | | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | - | `resources_offer` | [ResourceOffer](#akash.market.v1beta4.ResourceOffer) | repeated | | - + ### MsgCreateBidResponse MsgCreateBidResponse defines the Msg/CreateBid response type. @@ -5694,27 +7853,10 @@ A successful bid becomes a Lease(ID). - - - - ### ResourceOffer - ResourceOffer describes resources that provider is offering -for deployment - - - | Field | Type | Label | Description | - | ----- | ---- | ----- | ----------- | - | `resources` | [akash.base.v1beta3.Resources](#akash.base.v1beta3.Resources) | | | - | `count` | [uint32](#uint32) | | | - - - - - - + ### Bid.State State is an enum which refers to state of bid @@ -5736,14 +7878,14 @@ for deployment - +

Top

- ## akash/market/v1beta4/query.proto + ## akash/market/v1beta3/query.proto - + ### QueryBidRequest QueryBidRequest is request type for the Query/Bid RPC method @@ -5751,14 +7893,14 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [BidID](#akash.market.v1beta4.BidID) | | | + | `id` | [BidID](#akash.market.v1beta3.BidID) | | | - + ### QueryBidResponse QueryBidResponse is response type for the Query/Bid RPC method @@ -5766,7 +7908,7 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid` | [Bid](#akash.market.v1beta4.Bid) | | | + | `bid` | [Bid](#akash.market.v1beta3.Bid) | | | | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | @@ -5774,7 +7916,7 @@ for deployment - + ### QueryBidsRequest QueryBidsRequest is request type for the Query/Bids RPC method @@ -5782,7 +7924,7 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [BidFilters](#akash.market.v1beta4.BidFilters) | | | + | `filters` | [BidFilters](#akash.market.v1beta3.BidFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -5790,7 +7932,7 @@ for deployment - + ### QueryBidsResponse QueryBidsResponse is response type for the Query/Bids RPC method @@ -5798,7 +7940,7 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bids` | [QueryBidResponse](#akash.market.v1beta4.QueryBidResponse) | repeated | | + | `bids` | [QueryBidResponse](#akash.market.v1beta3.QueryBidResponse) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -5806,7 +7948,7 @@ for deployment - + ### QueryLeaseRequest QueryLeaseRequest is request type for the Query/Lease RPC method @@ -5814,14 +7956,14 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | + | `id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | - + ### QueryLeaseResponse QueryLeaseResponse is response type for the Query/Lease RPC method @@ -5829,7 +7971,7 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease` | [Lease](#akash.market.v1beta4.Lease) | | | + | `lease` | [Lease](#akash.market.v1beta3.Lease) | | | | `escrow_payment` | [akash.escrow.v1beta3.FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | | | @@ -5837,7 +7979,7 @@ for deployment - + ### QueryLeasesRequest QueryLeasesRequest is request type for the Query/Leases RPC method @@ -5845,7 +7987,7 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [LeaseFilters](#akash.market.v1beta4.LeaseFilters) | | | + | `filters` | [LeaseFilters](#akash.market.v1beta3.LeaseFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -5853,7 +7995,7 @@ for deployment - + ### QueryLeasesResponse QueryLeasesResponse is response type for the Query/Leases RPC method @@ -5861,7 +8003,7 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `leases` | [QueryLeaseResponse](#akash.market.v1beta4.QueryLeaseResponse) | repeated | | + | `leases` | [QueryLeaseResponse](#akash.market.v1beta3.QueryLeaseResponse) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -5869,7 +8011,7 @@ for deployment - + ### QueryOrderRequest QueryOrderRequest is request type for the Query/Order RPC method @@ -5877,14 +8019,14 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [OrderID](#akash.market.v1beta4.OrderID) | | | + | `id` | [OrderID](#akash.market.v1beta3.OrderID) | | | - + ### QueryOrderResponse QueryOrderResponse is response type for the Query/Order RPC method @@ -5892,14 +8034,14 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order` | [Order](#akash.market.v1beta4.Order) | | | + | `order` | [Order](#akash.market.v1beta3.Order) | | | - + ### QueryOrdersRequest QueryOrdersRequest is request type for the Query/Orders RPC method @@ -5907,7 +8049,7 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [OrderFilters](#akash.market.v1beta4.OrderFilters) | | | + | `filters` | [OrderFilters](#akash.market.v1beta3.OrderFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -5915,7 +8057,7 @@ for deployment - + ### QueryOrdersResponse QueryOrdersResponse is response type for the Query/Orders RPC method @@ -5923,7 +8065,7 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `orders` | [Order](#akash.market.v1beta4.Order) | repeated | | + | `orders` | [Order](#akash.market.v1beta3.Order) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -5937,28 +8079,28 @@ for deployment - + ### Query Query defines the gRPC querier service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Orders` | [QueryOrdersRequest](#akash.market.v1beta4.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta4.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta4/orders/list| - | `Order` | [QueryOrderRequest](#akash.market.v1beta4.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta4.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta4/orders/info| - | `Bids` | [QueryBidsRequest](#akash.market.v1beta4.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta4.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta4/bids/list| - | `Bid` | [QueryBidRequest](#akash.market.v1beta4.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta4.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta4/bids/info| - | `Leases` | [QueryLeasesRequest](#akash.market.v1beta4.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta4.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta4/leases/list| - | `Lease` | [QueryLeaseRequest](#akash.market.v1beta4.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta4.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta4/leases/info| + | `Orders` | [QueryOrdersRequest](#akash.market.v1beta3.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta3.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta3/orders/list| + | `Order` | [QueryOrderRequest](#akash.market.v1beta3.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta3.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta3/orders/info| + | `Bids` | [QueryBidsRequest](#akash.market.v1beta3.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta3.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta3/bids/list| + | `Bid` | [QueryBidRequest](#akash.market.v1beta3.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta3.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta3/bids/info| + | `Leases` | [QueryLeasesRequest](#akash.market.v1beta3.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta3.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta3/leases/list| + | `Lease` | [QueryLeaseRequest](#akash.market.v1beta3.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta3.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta3/leases/info| - +

Top

- ## akash/market/v1beta4/service.proto + ## akash/market/v1beta3/service.proto @@ -5968,31 +8110,31 @@ for deployment - + ### Msg Msg defines the market Msg service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateBid` | [MsgCreateBid](#akash.market.v1beta4.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta4.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | - | `CloseBid` | [MsgCloseBid](#akash.market.v1beta4.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta4.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | - | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta4.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta4.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | - | `CreateLease` | [MsgCreateLease](#akash.market.v1beta4.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta4.MsgCreateLeaseResponse) | CreateLease creates a new lease | | - | `CloseLease` | [MsgCloseLease](#akash.market.v1beta4.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta4.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | + | `CreateBid` | [MsgCreateBid](#akash.market.v1beta3.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta3.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | + | `CloseBid` | [MsgCloseBid](#akash.market.v1beta3.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta3.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | + | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta3.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta3.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | + | `CreateLease` | [MsgCreateLease](#akash.market.v1beta3.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta3.MsgCreateLeaseResponse) | CreateLease creates a new lease | | + | `CloseLease` | [MsgCloseLease](#akash.market.v1beta3.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta3.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | - +

Top

- ## akash/market/v1beta4/lease.proto + ## akash/market/v1beta3/lease.proto - + ### Lease Lease stores LeaseID, state of lease and price @@ -6000,8 +8142,8 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | - | `state` | [Lease.State](#akash.market.v1beta4.Lease.State) | | | + | `lease_id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | + | `state` | [Lease.State](#akash.market.v1beta3.Lease.State) | | | | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `created_at` | [int64](#int64) | | | | `closed_on` | [int64](#int64) | | | @@ -6011,7 +8153,7 @@ for deployment - + ### LeaseFilters LeaseFilters defines flags for lease list filter @@ -6031,7 +8173,7 @@ for deployment - + ### LeaseID LeaseID stores bid details of lease @@ -6050,7 +8192,7 @@ for deployment - + ### MsgCloseLease MsgCloseLease defines an SDK message for closing order @@ -6058,14 +8200,14 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | + | `lease_id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | - + ### MsgCloseLeaseResponse MsgCloseLeaseResponse defines the Msg/CloseLease response type. @@ -6075,7 +8217,7 @@ for deployment - + ### MsgCreateLease MsgCreateLease is sent to create a lease @@ -6083,14 +8225,14 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta4.BidID) | | | + | `bid_id` | [BidID](#akash.market.v1beta3.BidID) | | | - + ### MsgCreateLeaseResponse MsgCreateLeaseResponse is the response from creating a lease @@ -6100,7 +8242,7 @@ for deployment - + ### MsgWithdrawLease MsgWithdrawLease defines an SDK message for closing bid @@ -6108,14 +8250,14 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [LeaseID](#akash.market.v1beta4.LeaseID) | | | + | `bid_id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | - + ### MsgWithdrawLeaseResponse MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. @@ -6127,7 +8269,7 @@ for deployment - + ### Lease.State State is an enum which refers to state of lease @@ -6148,14 +8290,14 @@ for deployment - +

Top

- ## akash/market/v1beta4/genesis.proto + ## akash/market/v1beta3/genesis.proto - + ### GenesisState GenesisState defines the basic genesis state used by market module @@ -6163,10 +8305,10 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.market.v1beta4.Params) | | | - | `orders` | [Order](#akash.market.v1beta4.Order) | repeated | | - | `leases` | [Lease](#akash.market.v1beta4.Lease) | repeated | | - | `bids` | [Bid](#akash.market.v1beta4.Bid) | repeated | | + | `params` | [Params](#akash.market.v1beta3.Params) | | | + | `orders` | [Order](#akash.market.v1beta3.Order) | repeated | | + | `leases` | [Lease](#akash.market.v1beta3.Lease) | repeated | | + | `bids` | [Bid](#akash.market.v1beta3.Bid) | repeated | | @@ -6182,14 +8324,14 @@ for deployment - +

Top

- ## akash/market/v1beta4/order.proto + ## akash/market/v1beta3/order.proto - + ### Order Order stores orderID, state of order and other details @@ -6197,8 +8339,8 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order_id` | [OrderID](#akash.market.v1beta4.OrderID) | | | - | `state` | [Order.State](#akash.market.v1beta4.Order.State) | | | + | `order_id` | [OrderID](#akash.market.v1beta3.OrderID) | | | + | `state` | [Order.State](#akash.market.v1beta3.Order.State) | | | | `spec` | [akash.deployment.v1beta3.GroupSpec](#akash.deployment.v1beta3.GroupSpec) | | | | `created_at` | [int64](#int64) | | | @@ -6207,7 +8349,7 @@ for deployment - + ### OrderFilters OrderFilters defines flags for order list filter @@ -6226,7 +8368,7 @@ for deployment - + ### OrderID OrderID stores owner and all other seq numbers @@ -6246,7 +8388,7 @@ for deployment - + ### Order.State State is an enum which refers to state of order @@ -6267,14 +8409,14 @@ for deployment - +

Top

- ## akash/market/v1beta4/params.proto + ## akash/market/v1beta3/params.proto - + ### Params Params is the params for the x/market module @@ -6299,14 +8441,14 @@ for deployment - +

Top

- ## akash/market/v1beta3/bid.proto + ## akash/market/v1beta2/bid.proto - + ### Bid Bid stores BidID, state of bid and price @@ -6314,8 +8456,8 @@ for deployment | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta3.BidID) | | | - | `state` | [Bid.State](#akash.market.v1beta3.Bid.State) | | | + | `bid_id` | [BidID](#akash.market.v1beta2.BidID) | | | + | `state` | [Bid.State](#akash.market.v1beta2.Bid.State) | | | | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `created_at` | [int64](#int64) | | | @@ -6324,7 +8466,7 @@ for deployment - + ### BidFilters BidFilters defines flags for bid list filter @@ -6344,7 +8486,7 @@ for deployment - + ### BidID BidID stores owner and all other seq numbers @@ -6364,7 +8506,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCloseBid MsgCloseBid defines an SDK message for closing bid @@ -6372,14 +8514,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta3.BidID) | | | + | `bid_id` | [BidID](#akash.market.v1beta2.BidID) | | | - + ### MsgCloseBidResponse MsgCloseBidResponse defines the Msg/CloseBid response type. @@ -6389,7 +8531,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCreateBid MsgCreateBid defines an SDK message for creating Bid @@ -6397,7 +8539,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order` | [OrderID](#akash.market.v1beta3.OrderID) | | | + | `order` | [OrderID](#akash.market.v1beta2.OrderID) | | | | `provider` | [string](#string) | | | | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | @@ -6407,7 +8549,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCreateBidResponse MsgCreateBidResponse defines the Msg/CreateBid response type. @@ -6419,7 +8561,7 @@ A successful bid becomes a Lease(ID). - + ### Bid.State State is an enum which refers to state of bid @@ -6441,14 +8583,14 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta3/query.proto + ## akash/market/v1beta2/query.proto - + ### QueryBidRequest QueryBidRequest is request type for the Query/Bid RPC method @@ -6456,14 +8598,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [BidID](#akash.market.v1beta3.BidID) | | | + | `id` | [BidID](#akash.market.v1beta2.BidID) | | | - + ### QueryBidResponse QueryBidResponse is response type for the Query/Bid RPC method @@ -6471,15 +8613,15 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid` | [Bid](#akash.market.v1beta3.Bid) | | | - | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | + | `bid` | [Bid](#akash.market.v1beta2.Bid) | | | + | `escrow_account` | [akash.escrow.v1beta2.Account](#akash.escrow.v1beta2.Account) | | | - + ### QueryBidsRequest QueryBidsRequest is request type for the Query/Bids RPC method @@ -6487,7 +8629,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [BidFilters](#akash.market.v1beta3.BidFilters) | | | + | `filters` | [BidFilters](#akash.market.v1beta2.BidFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -6495,7 +8637,7 @@ A successful bid becomes a Lease(ID). - + ### QueryBidsResponse QueryBidsResponse is response type for the Query/Bids RPC method @@ -6503,7 +8645,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bids` | [QueryBidResponse](#akash.market.v1beta3.QueryBidResponse) | repeated | | + | `bids` | [QueryBidResponse](#akash.market.v1beta2.QueryBidResponse) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -6511,7 +8653,7 @@ A successful bid becomes a Lease(ID). - + ### QueryLeaseRequest QueryLeaseRequest is request type for the Query/Lease RPC method @@ -6519,14 +8661,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | + | `id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | - + ### QueryLeaseResponse QueryLeaseResponse is response type for the Query/Lease RPC method @@ -6534,15 +8676,15 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease` | [Lease](#akash.market.v1beta3.Lease) | | | - | `escrow_payment` | [akash.escrow.v1beta3.FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | | | + | `lease` | [Lease](#akash.market.v1beta2.Lease) | | | + | `escrow_payment` | [akash.escrow.v1beta2.FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) | | | - + ### QueryLeasesRequest QueryLeasesRequest is request type for the Query/Leases RPC method @@ -6550,7 +8692,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [LeaseFilters](#akash.market.v1beta3.LeaseFilters) | | | + | `filters` | [LeaseFilters](#akash.market.v1beta2.LeaseFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -6558,7 +8700,7 @@ A successful bid becomes a Lease(ID). - + ### QueryLeasesResponse QueryLeasesResponse is response type for the Query/Leases RPC method @@ -6566,7 +8708,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `leases` | [QueryLeaseResponse](#akash.market.v1beta3.QueryLeaseResponse) | repeated | | + | `leases` | [QueryLeaseResponse](#akash.market.v1beta2.QueryLeaseResponse) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -6574,7 +8716,7 @@ A successful bid becomes a Lease(ID). - + ### QueryOrderRequest QueryOrderRequest is request type for the Query/Order RPC method @@ -6582,14 +8724,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [OrderID](#akash.market.v1beta3.OrderID) | | | + | `id` | [OrderID](#akash.market.v1beta2.OrderID) | | | - + ### QueryOrderResponse QueryOrderResponse is response type for the Query/Order RPC method @@ -6597,14 +8739,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order` | [Order](#akash.market.v1beta3.Order) | | | + | `order` | [Order](#akash.market.v1beta2.Order) | | | - + ### QueryOrdersRequest QueryOrdersRequest is request type for the Query/Orders RPC method @@ -6612,7 +8754,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [OrderFilters](#akash.market.v1beta3.OrderFilters) | | | + | `filters` | [OrderFilters](#akash.market.v1beta2.OrderFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -6620,7 +8762,7 @@ A successful bid becomes a Lease(ID). - + ### QueryOrdersResponse QueryOrdersResponse is response type for the Query/Orders RPC method @@ -6628,7 +8770,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `orders` | [Order](#akash.market.v1beta3.Order) | repeated | | + | `orders` | [Order](#akash.market.v1beta2.Order) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -6642,28 +8784,28 @@ A successful bid becomes a Lease(ID). - + ### Query Query defines the gRPC querier service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Orders` | [QueryOrdersRequest](#akash.market.v1beta3.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta3.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta3/orders/list| - | `Order` | [QueryOrderRequest](#akash.market.v1beta3.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta3.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta3/orders/info| - | `Bids` | [QueryBidsRequest](#akash.market.v1beta3.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta3.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta3/bids/list| - | `Bid` | [QueryBidRequest](#akash.market.v1beta3.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta3.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta3/bids/info| - | `Leases` | [QueryLeasesRequest](#akash.market.v1beta3.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta3.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta3/leases/list| - | `Lease` | [QueryLeaseRequest](#akash.market.v1beta3.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta3.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta3/leases/info| + | `Orders` | [QueryOrdersRequest](#akash.market.v1beta2.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta2.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta2/orders/list| + | `Order` | [QueryOrderRequest](#akash.market.v1beta2.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta2.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta2/orders/info| + | `Bids` | [QueryBidsRequest](#akash.market.v1beta2.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta2.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta2/bids/list| + | `Bid` | [QueryBidRequest](#akash.market.v1beta2.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta2.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta2/bids/info| + | `Leases` | [QueryLeasesRequest](#akash.market.v1beta2.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta2.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta2/leases/list| + | `Lease` | [QueryLeaseRequest](#akash.market.v1beta2.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta2.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta2/leases/info| - +

Top

- ## akash/market/v1beta3/service.proto + ## akash/market/v1beta2/service.proto @@ -6673,31 +8815,31 @@ A successful bid becomes a Lease(ID). - + ### Msg Msg defines the market Msg service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateBid` | [MsgCreateBid](#akash.market.v1beta3.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta3.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | - | `CloseBid` | [MsgCloseBid](#akash.market.v1beta3.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta3.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | - | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta3.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta3.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | - | `CreateLease` | [MsgCreateLease](#akash.market.v1beta3.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta3.MsgCreateLeaseResponse) | CreateLease creates a new lease | | - | `CloseLease` | [MsgCloseLease](#akash.market.v1beta3.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta3.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | + | `CreateBid` | [MsgCreateBid](#akash.market.v1beta2.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta2.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | + | `CloseBid` | [MsgCloseBid](#akash.market.v1beta2.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta2.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | + | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta2.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta2.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | + | `CreateLease` | [MsgCreateLease](#akash.market.v1beta2.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta2.MsgCreateLeaseResponse) | CreateLease creates a new lease | | + | `CloseLease` | [MsgCloseLease](#akash.market.v1beta2.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta2.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | - +

Top

- ## akash/market/v1beta3/lease.proto + ## akash/market/v1beta2/lease.proto - + ### Lease Lease stores LeaseID, state of lease and price @@ -6705,8 +8847,8 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | - | `state` | [Lease.State](#akash.market.v1beta3.Lease.State) | | | + | `lease_id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | + | `state` | [Lease.State](#akash.market.v1beta2.Lease.State) | | | | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `created_at` | [int64](#int64) | | | | `closed_on` | [int64](#int64) | | | @@ -6716,7 +8858,7 @@ A successful bid becomes a Lease(ID). - + ### LeaseFilters LeaseFilters defines flags for lease list filter @@ -6736,7 +8878,7 @@ A successful bid becomes a Lease(ID). - + ### LeaseID LeaseID stores bid details of lease @@ -6755,7 +8897,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCloseLease MsgCloseLease defines an SDK message for closing order @@ -6763,14 +8905,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | + | `lease_id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | - + ### MsgCloseLeaseResponse MsgCloseLeaseResponse defines the Msg/CloseLease response type. @@ -6780,7 +8922,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCreateLease MsgCreateLease is sent to create a lease @@ -6788,14 +8930,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta3.BidID) | | | + | `bid_id` | [BidID](#akash.market.v1beta2.BidID) | | | - + ### MsgCreateLeaseResponse MsgCreateLeaseResponse is the response from creating a lease @@ -6805,7 +8947,7 @@ A successful bid becomes a Lease(ID). - + ### MsgWithdrawLease MsgWithdrawLease defines an SDK message for closing bid @@ -6813,14 +8955,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [LeaseID](#akash.market.v1beta3.LeaseID) | | | + | `bid_id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | - + ### MsgWithdrawLeaseResponse MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. @@ -6832,7 +8974,7 @@ A successful bid becomes a Lease(ID). - + ### Lease.State State is an enum which refers to state of lease @@ -6853,14 +8995,14 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta3/genesis.proto + ## akash/market/v1beta2/genesis.proto - + ### GenesisState GenesisState defines the basic genesis state used by market module @@ -6868,10 +9010,9 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `params` | [Params](#akash.market.v1beta3.Params) | | | - | `orders` | [Order](#akash.market.v1beta3.Order) | repeated | | - | `leases` | [Lease](#akash.market.v1beta3.Lease) | repeated | | - | `bids` | [Bid](#akash.market.v1beta3.Bid) | repeated | | + | `orders` | [Order](#akash.market.v1beta2.Order) | repeated | | + | `leases` | [Lease](#akash.market.v1beta2.Lease) | repeated | | + | `params` | [Params](#akash.market.v1beta2.Params) | | | @@ -6887,14 +9028,14 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta3/order.proto + ## akash/market/v1beta2/order.proto - + ### Order Order stores orderID, state of order and other details @@ -6902,9 +9043,9 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order_id` | [OrderID](#akash.market.v1beta3.OrderID) | | | - | `state` | [Order.State](#akash.market.v1beta3.Order.State) | | | - | `spec` | [akash.deployment.v1beta3.GroupSpec](#akash.deployment.v1beta3.GroupSpec) | | | + | `order_id` | [OrderID](#akash.market.v1beta2.OrderID) | | | + | `state` | [Order.State](#akash.market.v1beta2.Order.State) | | | + | `spec` | [akash.deployment.v1beta2.GroupSpec](#akash.deployment.v1beta2.GroupSpec) | | | | `created_at` | [int64](#int64) | | | @@ -6912,7 +9053,7 @@ A successful bid becomes a Lease(ID). - + ### OrderFilters OrderFilters defines flags for order list filter @@ -6931,7 +9072,7 @@ A successful bid becomes a Lease(ID). - + ### OrderID OrderID stores owner and all other seq numbers @@ -6951,7 +9092,7 @@ A successful bid becomes a Lease(ID). - + ### Order.State State is an enum which refers to state of order @@ -6972,14 +9113,14 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta3/params.proto + ## akash/market/v1beta2/params.proto - + ### Params Params is the params for the x/market module @@ -7004,14 +9145,14 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta2/bid.proto + ## akash/market/v1beta5/bid.proto - + ### Bid Bid stores BidID, state of bid and price @@ -7019,17 +9160,18 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta2.BidID) | | | - | `state` | [Bid.State](#akash.market.v1beta2.Bid.State) | | | + | `bid_id` | [BidID](#akash.market.v1beta5.BidID) | | | + | `state` | [Bid.State](#akash.market.v1beta5.Bid.State) | | | | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `created_at` | [int64](#int64) | | | + | `resources_offer` | [ResourceOffer](#akash.market.v1beta5.ResourceOffer) | repeated | | - + ### BidFilters BidFilters defines flags for bid list filter @@ -7049,7 +9191,7 @@ A successful bid becomes a Lease(ID). - + ### BidID BidID stores owner and all other seq numbers @@ -7069,7 +9211,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCloseBid MsgCloseBid defines an SDK message for closing bid @@ -7077,14 +9219,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta2.BidID) | | | + | `bid_id` | [BidID](#akash.market.v1beta5.BidID) | | | - + ### MsgCloseBidResponse MsgCloseBidResponse defines the Msg/CloseBid response type. @@ -7094,7 +9236,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCreateBid MsgCreateBid defines an SDK message for creating Bid @@ -7102,17 +9244,18 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order` | [OrderID](#akash.market.v1beta2.OrderID) | | | + | `order` | [OrderID](#akash.market.v1beta5.OrderID) | | | | `provider` | [string](#string) | | | | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `deposit` | [cosmos.base.v1beta1.Coin](#cosmos.base.v1beta1.Coin) | | | + | `resources_offer` | [ResourceOffer](#akash.market.v1beta5.ResourceOffer) | repeated | | - + ### MsgCreateBidResponse MsgCreateBidResponse defines the Msg/CreateBid response type. @@ -7121,10 +9264,27 @@ A successful bid becomes a Lease(ID). + + + + ### ResourceOffer + ResourceOffer describes resources that provider is offering +for deployment + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `resources` | [akash.base.resources.v1.Resources](#akash.base.resources.v1.Resources) | | | + | `count` | [uint32](#uint32) | | | + + + + + - + ### Bid.State State is an enum which refers to state of bid @@ -7146,14 +9306,14 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta2/query.proto + ## akash/market/v1beta5/query.proto - + ### QueryBidRequest QueryBidRequest is request type for the Query/Bid RPC method @@ -7161,14 +9321,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [BidID](#akash.market.v1beta2.BidID) | | | + | `id` | [BidID](#akash.market.v1beta5.BidID) | | | - + ### QueryBidResponse QueryBidResponse is response type for the Query/Bid RPC method @@ -7176,15 +9336,15 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid` | [Bid](#akash.market.v1beta2.Bid) | | | - | `escrow_account` | [akash.escrow.v1beta2.Account](#akash.escrow.v1beta2.Account) | | | + | `bid` | [Bid](#akash.market.v1beta5.Bid) | | | + | `escrow_account` | [akash.escrow.v1beta3.Account](#akash.escrow.v1beta3.Account) | | | - + ### QueryBidsRequest QueryBidsRequest is request type for the Query/Bids RPC method @@ -7192,7 +9352,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [BidFilters](#akash.market.v1beta2.BidFilters) | | | + | `filters` | [BidFilters](#akash.market.v1beta5.BidFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -7200,7 +9360,7 @@ A successful bid becomes a Lease(ID). - + ### QueryBidsResponse QueryBidsResponse is response type for the Query/Bids RPC method @@ -7208,7 +9368,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bids` | [QueryBidResponse](#akash.market.v1beta2.QueryBidResponse) | repeated | | + | `bids` | [QueryBidResponse](#akash.market.v1beta5.QueryBidResponse) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -7216,7 +9376,7 @@ A successful bid becomes a Lease(ID). - + ### QueryLeaseRequest QueryLeaseRequest is request type for the Query/Lease RPC method @@ -7224,14 +9384,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | + | `id` | [LeaseID](#akash.market.v1beta5.LeaseID) | | | - + ### QueryLeaseResponse QueryLeaseResponse is response type for the Query/Lease RPC method @@ -7239,15 +9399,15 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease` | [Lease](#akash.market.v1beta2.Lease) | | | - | `escrow_payment` | [akash.escrow.v1beta2.FractionalPayment](#akash.escrow.v1beta2.FractionalPayment) | | | + | `lease` | [Lease](#akash.market.v1beta5.Lease) | | | + | `escrow_payment` | [akash.escrow.v1beta3.FractionalPayment](#akash.escrow.v1beta3.FractionalPayment) | | | - + ### QueryLeasesRequest QueryLeasesRequest is request type for the Query/Leases RPC method @@ -7255,7 +9415,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [LeaseFilters](#akash.market.v1beta2.LeaseFilters) | | | + | `filters` | [LeaseFilters](#akash.market.v1beta5.LeaseFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -7263,7 +9423,7 @@ A successful bid becomes a Lease(ID). - + ### QueryLeasesResponse QueryLeasesResponse is response type for the Query/Leases RPC method @@ -7271,7 +9431,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `leases` | [QueryLeaseResponse](#akash.market.v1beta2.QueryLeaseResponse) | repeated | | + | `leases` | [QueryLeaseResponse](#akash.market.v1beta5.QueryLeaseResponse) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -7279,7 +9439,7 @@ A successful bid becomes a Lease(ID). - + ### QueryOrderRequest QueryOrderRequest is request type for the Query/Order RPC method @@ -7287,14 +9447,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `id` | [OrderID](#akash.market.v1beta2.OrderID) | | | + | `id` | [OrderID](#akash.market.v1beta5.OrderID) | | | - + ### QueryOrderResponse QueryOrderResponse is response type for the Query/Order RPC method @@ -7302,14 +9462,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order` | [Order](#akash.market.v1beta2.Order) | | | + | `order` | [Order](#akash.market.v1beta5.Order) | | | - + ### QueryOrdersRequest QueryOrdersRequest is request type for the Query/Orders RPC method @@ -7317,7 +9477,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `filters` | [OrderFilters](#akash.market.v1beta2.OrderFilters) | | | + | `filters` | [OrderFilters](#akash.market.v1beta5.OrderFilters) | | | | `pagination` | [cosmos.base.query.v1beta1.PageRequest](#cosmos.base.query.v1beta1.PageRequest) | | | @@ -7325,7 +9485,7 @@ A successful bid becomes a Lease(ID). - + ### QueryOrdersResponse QueryOrdersResponse is response type for the Query/Orders RPC method @@ -7333,7 +9493,7 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `orders` | [Order](#akash.market.v1beta2.Order) | repeated | | + | `orders` | [Order](#akash.market.v1beta5.Order) | repeated | | | `pagination` | [cosmos.base.query.v1beta1.PageResponse](#cosmos.base.query.v1beta1.PageResponse) | | | @@ -7347,28 +9507,28 @@ A successful bid becomes a Lease(ID). - + ### Query Query defines the gRPC querier service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `Orders` | [QueryOrdersRequest](#akash.market.v1beta2.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta2.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta2/orders/list| - | `Order` | [QueryOrderRequest](#akash.market.v1beta2.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta2.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta2/orders/info| - | `Bids` | [QueryBidsRequest](#akash.market.v1beta2.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta2.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta2/bids/list| - | `Bid` | [QueryBidRequest](#akash.market.v1beta2.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta2.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta2/bids/info| - | `Leases` | [QueryLeasesRequest](#akash.market.v1beta2.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta2.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta2/leases/list| - | `Lease` | [QueryLeaseRequest](#akash.market.v1beta2.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta2.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta2/leases/info| + | `Orders` | [QueryOrdersRequest](#akash.market.v1beta5.QueryOrdersRequest) | [QueryOrdersResponse](#akash.market.v1beta5.QueryOrdersResponse) | Orders queries orders with filters | GET|/akash/market/v1beta5/orders/list| + | `Order` | [QueryOrderRequest](#akash.market.v1beta5.QueryOrderRequest) | [QueryOrderResponse](#akash.market.v1beta5.QueryOrderResponse) | Order queries order details | GET|/akash/market/v1beta5/orders/info| + | `Bids` | [QueryBidsRequest](#akash.market.v1beta5.QueryBidsRequest) | [QueryBidsResponse](#akash.market.v1beta5.QueryBidsResponse) | Bids queries bids with filters | GET|/akash/market/v1beta5/bids/list| + | `Bid` | [QueryBidRequest](#akash.market.v1beta5.QueryBidRequest) | [QueryBidResponse](#akash.market.v1beta5.QueryBidResponse) | Bid queries bid details | GET|/akash/market/v1beta5/bids/info| + | `Leases` | [QueryLeasesRequest](#akash.market.v1beta5.QueryLeasesRequest) | [QueryLeasesResponse](#akash.market.v1beta5.QueryLeasesResponse) | Leases queries leases with filters | GET|/akash/market/v1beta5/leases/list| + | `Lease` | [QueryLeaseRequest](#akash.market.v1beta5.QueryLeaseRequest) | [QueryLeaseResponse](#akash.market.v1beta5.QueryLeaseResponse) | Lease queries lease details | GET|/akash/market/v1beta5/leases/info| - +

Top

- ## akash/market/v1beta2/service.proto + ## akash/market/v1beta5/service.proto @@ -7378,31 +9538,31 @@ A successful bid becomes a Lease(ID). - + ### Msg Msg defines the market Msg service | Method Name | Request Type | Response Type | Description | HTTP Verb | Endpoint | | ----------- | ------------ | ------------- | ------------| ------- | -------- | - | `CreateBid` | [MsgCreateBid](#akash.market.v1beta2.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta2.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | - | `CloseBid` | [MsgCloseBid](#akash.market.v1beta2.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta2.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | - | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta2.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta2.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | - | `CreateLease` | [MsgCreateLease](#akash.market.v1beta2.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta2.MsgCreateLeaseResponse) | CreateLease creates a new lease | | - | `CloseLease` | [MsgCloseLease](#akash.market.v1beta2.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta2.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | + | `CreateBid` | [MsgCreateBid](#akash.market.v1beta5.MsgCreateBid) | [MsgCreateBidResponse](#akash.market.v1beta5.MsgCreateBidResponse) | CreateBid defines a method to create a bid given proper inputs. | | + | `CloseBid` | [MsgCloseBid](#akash.market.v1beta5.MsgCloseBid) | [MsgCloseBidResponse](#akash.market.v1beta5.MsgCloseBidResponse) | CloseBid defines a method to close a bid given proper inputs. | | + | `WithdrawLease` | [MsgWithdrawLease](#akash.market.v1beta5.MsgWithdrawLease) | [MsgWithdrawLeaseResponse](#akash.market.v1beta5.MsgWithdrawLeaseResponse) | WithdrawLease withdraws accrued funds from the lease payment | | + | `CreateLease` | [MsgCreateLease](#akash.market.v1beta5.MsgCreateLease) | [MsgCreateLeaseResponse](#akash.market.v1beta5.MsgCreateLeaseResponse) | CreateLease creates a new lease | | + | `CloseLease` | [MsgCloseLease](#akash.market.v1beta5.MsgCloseLease) | [MsgCloseLeaseResponse](#akash.market.v1beta5.MsgCloseLeaseResponse) | CloseLease defines a method to close an order given proper inputs. | | - +

Top

- ## akash/market/v1beta2/lease.proto + ## akash/market/v1beta5/lease.proto - + ### Lease Lease stores LeaseID, state of lease and price @@ -7410,8 +9570,8 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | - | `state` | [Lease.State](#akash.market.v1beta2.Lease.State) | | | + | `lease_id` | [LeaseID](#akash.market.v1beta5.LeaseID) | | | + | `state` | [Lease.State](#akash.market.v1beta5.Lease.State) | | | | `price` | [cosmos.base.v1beta1.DecCoin](#cosmos.base.v1beta1.DecCoin) | | | | `created_at` | [int64](#int64) | | | | `closed_on` | [int64](#int64) | | | @@ -7421,7 +9581,7 @@ A successful bid becomes a Lease(ID). - + ### LeaseFilters LeaseFilters defines flags for lease list filter @@ -7441,7 +9601,7 @@ A successful bid becomes a Lease(ID). - + ### LeaseID LeaseID stores bid details of lease @@ -7460,7 +9620,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCloseLease MsgCloseLease defines an SDK message for closing order @@ -7468,14 +9628,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `lease_id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | + | `lease_id` | [LeaseID](#akash.market.v1beta5.LeaseID) | | | - + ### MsgCloseLeaseResponse MsgCloseLeaseResponse defines the Msg/CloseLease response type. @@ -7485,7 +9645,7 @@ A successful bid becomes a Lease(ID). - + ### MsgCreateLease MsgCreateLease is sent to create a lease @@ -7493,14 +9653,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [BidID](#akash.market.v1beta2.BidID) | | | + | `bid_id` | [BidID](#akash.market.v1beta5.BidID) | | | - + ### MsgCreateLeaseResponse MsgCreateLeaseResponse is the response from creating a lease @@ -7510,7 +9670,7 @@ A successful bid becomes a Lease(ID). - + ### MsgWithdrawLease MsgWithdrawLease defines an SDK message for closing bid @@ -7518,14 +9678,14 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `bid_id` | [LeaseID](#akash.market.v1beta2.LeaseID) | | | + | `bid_id` | [LeaseID](#akash.market.v1beta5.LeaseID) | | | - + ### MsgWithdrawLeaseResponse MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. @@ -7537,7 +9697,7 @@ A successful bid becomes a Lease(ID). - + ### Lease.State State is an enum which refers to state of lease @@ -7558,14 +9718,14 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta2/genesis.proto + ## akash/market/v1beta5/genesis.proto - + ### GenesisState GenesisState defines the basic genesis state used by market module @@ -7573,9 +9733,10 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `orders` | [Order](#akash.market.v1beta2.Order) | repeated | | - | `leases` | [Lease](#akash.market.v1beta2.Lease) | repeated | | - | `params` | [Params](#akash.market.v1beta2.Params) | | | + | `params` | [Params](#akash.market.v1beta5.Params) | | | + | `orders` | [Order](#akash.market.v1beta5.Order) | repeated | | + | `leases` | [Lease](#akash.market.v1beta5.Lease) | repeated | | + | `bids` | [Bid](#akash.market.v1beta5.Bid) | repeated | | @@ -7591,14 +9752,14 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta2/order.proto + ## akash/market/v1beta5/order.proto - + ### Order Order stores orderID, state of order and other details @@ -7606,9 +9767,9 @@ A successful bid becomes a Lease(ID). | Field | Type | Label | Description | | ----- | ---- | ----- | ----------- | - | `order_id` | [OrderID](#akash.market.v1beta2.OrderID) | | | - | `state` | [Order.State](#akash.market.v1beta2.Order.State) | | | - | `spec` | [akash.deployment.v1beta2.GroupSpec](#akash.deployment.v1beta2.GroupSpec) | | | + | `order_id` | [OrderID](#akash.market.v1beta5.OrderID) | | | + | `state` | [Order.State](#akash.market.v1beta5.Order.State) | | | + | `spec` | [akash.deployment.v1beta4.GroupSpec](#akash.deployment.v1beta4.GroupSpec) | | | | `created_at` | [int64](#int64) | | | @@ -7616,7 +9777,7 @@ A successful bid becomes a Lease(ID). - + ### OrderFilters OrderFilters defines flags for order list filter @@ -7635,7 +9796,7 @@ A successful bid becomes a Lease(ID). - + ### OrderID OrderID stores owner and all other seq numbers @@ -7655,7 +9816,7 @@ A successful bid becomes a Lease(ID). - + ### Order.State State is an enum which refers to state of order @@ -7676,14 +9837,14 @@ A successful bid becomes a Lease(ID). - +

Top

- ## akash/market/v1beta2/params.proto + ## akash/market/v1beta5/params.proto - + ### Params Params is the params for the x/market module diff --git a/docs/proto/provider.md b/docs/proto/provider.md index 853de01c..076b318f 100644 --- a/docs/proto/provider.md +++ b/docs/proto/provider.md @@ -33,6 +33,21 @@ - [ServiceParams](#akash.manifest.v2beta2.ServiceParams) - [StorageParams](#akash.manifest.v2beta2.StorageParams) + - [akash/manifest/v2beta3/group.proto](#akash/manifest/v2beta3/group.proto) + - [Group](#akash.manifest.v2beta3.Group) + + - [akash/manifest/v2beta3/httpoptions.proto](#akash/manifest/v2beta3/httpoptions.proto) + - [ServiceExposeHTTPOptions](#akash.manifest.v2beta3.ServiceExposeHTTPOptions) + + - [akash/manifest/v2beta3/serviceexpose.proto](#akash/manifest/v2beta3/serviceexpose.proto) + - [ServiceExpose](#akash.manifest.v2beta3.ServiceExpose) + + - [akash/manifest/v2beta3/service.proto](#akash/manifest/v2beta3/service.proto) + - [ImageCredentials](#akash.manifest.v2beta3.ImageCredentials) + - [Service](#akash.manifest.v2beta3.Service) + - [ServiceParams](#akash.manifest.v2beta3.ServiceParams) + - [StorageParams](#akash.manifest.v2beta3.StorageParams) + - [akash/provider/v1/status.proto](#akash/provider/v1/status.proto) - [BidEngineStatus](#akash.provider.v1.BidEngineStatus) - [ClusterStatus](#akash.provider.v1.ClusterStatus) @@ -450,6 +465,203 @@ + + + + + + + + + + + +

Top

+ + ## akash/manifest/v2beta3/group.proto + + + + + + ### Group + Group store name and list of services + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `name` | [string](#string) | | | + | `services` | [Service](#akash.manifest.v2beta3.Service) | repeated | | + + + + + + + + + + + + + + + + +

Top

+ + ## akash/manifest/v2beta3/httpoptions.proto + + + + + + ### ServiceExposeHTTPOptions + ServiceExposeHTTPOptions + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `max_body_size` | [uint32](#uint32) | | | + | `read_timeout` | [uint32](#uint32) | | | + | `send_timeout` | [uint32](#uint32) | | | + | `next_tries` | [uint32](#uint32) | | | + | `next_timeout` | [uint32](#uint32) | | | + | `next_cases` | [string](#string) | repeated | | + + + + + + + + + + + + + + + + +

Top

+ + ## akash/manifest/v2beta3/serviceexpose.proto + + + + + + ### ServiceExpose + ServiceExpose stores exposed ports and hosts details + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `port` | [uint32](#uint32) | | port on the container | + | `external_port` | [uint32](#uint32) | | port on the service definition | + | `proto` | [string](#string) | | | + | `service` | [string](#string) | | | + | `global` | [bool](#bool) | | | + | `hosts` | [string](#string) | repeated | | + | `http_options` | [ServiceExposeHTTPOptions](#akash.manifest.v2beta3.ServiceExposeHTTPOptions) | | | + | `ip` | [string](#string) | | The name of the IP address associated with this, if any | + | `endpoint_sequence_number` | [uint32](#uint32) | | The sequence number of the associated endpoint in the on-chain data | + + + + + + + + + + + + + + + + +

Top

+ + ## akash/manifest/v2beta3/service.proto + + + + + + ### ImageCredentials + Credentials to fetch image from registry + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `host` | [string](#string) | | | + | `email` | [string](#string) | | | + | `username` | [string](#string) | | | + | `password` | [string](#string) | | | + + + + + + + + + ### Service + Service stores name, image, args, env, unit, count and expose list of service + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `name` | [string](#string) | | | + | `image` | [string](#string) | | | + | `command` | [string](#string) | repeated | | + | `args` | [string](#string) | repeated | | + | `env` | [string](#string) | repeated | | + | `resources` | [akash.base.resources.v1.Resources](#akash.base.resources.v1.Resources) | | | + | `count` | [uint32](#uint32) | | | + | `expose` | [ServiceExpose](#akash.manifest.v2beta3.ServiceExpose) | repeated | | + | `params` | [ServiceParams](#akash.manifest.v2beta3.ServiceParams) | | | + + + + + + + + + ### ServiceParams + ServiceParams + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `storage` | [StorageParams](#akash.manifest.v2beta3.StorageParams) | repeated | | + | `credentials` | [ImageCredentials](#akash.manifest.v2beta3.ImageCredentials) | | | + + + + + + + + + ### StorageParams + StorageParams + + + | Field | Type | Label | Description | + | ----- | ---- | ----- | ----------- | + | `name` | [string](#string) | | | + | `mount` | [string](#string) | | | + | `read_only` | [bool](#bool) | | | + + + + + diff --git a/go/manifest/v2beta3/errors.go b/go/manifest/v2beta3/errors.go new file mode 100644 index 00000000..1ac41fc9 --- /dev/null +++ b/go/manifest/v2beta3/errors.go @@ -0,0 +1,10 @@ +package v2beta3 + +import ( + "errors" +) + +var ( + ErrInvalidManifest = errors.New("invalid manifest") + ErrManifestCrossValidation = errors.New("manifest cross-validation error") +) diff --git a/go/manifest/v2beta3/group.go b/go/manifest/v2beta3/group.go new file mode 100644 index 00000000..543102a8 --- /dev/null +++ b/go/manifest/v2beta3/group.go @@ -0,0 +1,79 @@ +package v2beta3 + +import ( + "fmt" + "sort" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" +) + +var _ dtypes.ResourceGroup = (*Group)(nil) + +// GetName returns the name of group +func (g Group) GetName() string { + return g.Name +} + +func (g Group) GetResourceUnits() dtypes.ResourceUnits { + groups := make(map[uint32]*dtypes.ResourceUnit) + + for _, svc := range g.Services { + if _, exists := groups[svc.Resources.ID]; !exists { + groups[svc.Resources.ID] = &dtypes.ResourceUnit{ + Resources: svc.Resources, + Count: svc.Count, + } + } else { + groups[svc.Resources.ID].Count += svc.Count + } + } + + units := make(dtypes.ResourceUnits, 0, len(groups)) + + for i := range groups { + units = append(units, *groups[i]) + } + + return units +} + +func (g Group) AllHostnames() []string { + allHostnames := make([]string, 0) + for _, service := range g.Services { + for _, expose := range service.Expose { + allHostnames = append(allHostnames, expose.Hosts...) + } + } + + return allHostnames +} + +func (g *Group) Validate(helper *validateManifestGroupsHelper) error { + if 0 == len(g.Services) { + return fmt.Errorf("%w: group %q contains no services", ErrInvalidManifest, g.GetName()) + } + + if !sort.IsSorted(g.Services) { + return fmt.Errorf("%w: group %q services is not sorted", ErrInvalidManifest, g.GetName()) + } + + for _, s := range g.Services { + if err := s.validate(helper); err != nil { + return err + } + } + + return nil +} + +// checkAgainstGSpec check if manifest group is within GroupSpec resources +// NOTE: it modifies caller's gspec +func (g *Group) checkAgainstGSpec(gspec *groupSpec) error { + for _, svc := range g.Services { + if err := svc.checkAgainstGSpec(gspec); err != nil { + return fmt.Errorf("%w: group %q: %w", ErrManifestCrossValidation, g.Name, err) + } + } + + return nil +} diff --git a/go/manifest/v2beta3/group.pb.go b/go/manifest/v2beta3/group.pb.go new file mode 100644 index 00000000..b78b6e33 --- /dev/null +++ b/go/manifest/v2beta3/group.pb.go @@ -0,0 +1,399 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/manifest/v2beta3/group.proto + +package v2beta3 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Group store name and list of services +type Group struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Services Services `protobuf:"bytes,2,rep,name=services,proto3,castrepeated=Services" json:"services" yaml:"services"` +} + +func (m *Group) Reset() { *m = Group{} } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_d7cd4686cd5336b6, []int{0} +} +func (m *Group) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Group.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return m.Size() +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Group)(nil), "akash.manifest.v2beta3.Group") +} + +func init() { + proto.RegisterFile("akash/manifest/v2beta3/group.proto", fileDescriptor_d7cd4686cd5336b6) +} + +var fileDescriptor_d7cd4686cd5336b6 = []byte{ + // 276 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0xcf, 0x4d, 0xcc, 0xcb, 0x4c, 0x4b, 0x2d, 0x2e, 0xd1, 0x2f, 0x33, 0x4a, 0x4a, 0x2d, + 0x49, 0x34, 0xd6, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xd0, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, + 0x03, 0xab, 0xd1, 0x83, 0xa9, 0xd1, 0x83, 0xaa, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0x2b, + 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x54, 0x70, 0x98, 0x58, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, + 0x0a, 0x51, 0xa5, 0xb4, 0x82, 0x91, 0x8b, 0xd5, 0x1d, 0x64, 0x87, 0x90, 0x36, 0x17, 0x4b, 0x5e, + 0x62, 0x6e, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, 0xf8, 0xab, 0x7b, 0xf2, 0x60, 0xfe, + 0xa7, 0x7b, 0xf2, 0xdc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, 0x20, 0x9e, 0x52, 0x10, 0x58, 0x50, + 0x28, 0x87, 0x8b, 0x03, 0x6a, 0x4e, 0xb1, 0x04, 0x93, 0x02, 0xb3, 0x06, 0xb7, 0x91, 0xbc, 0x1e, + 0x76, 0xd7, 0xe9, 0x05, 0x43, 0xd4, 0x39, 0xe9, 0x9f, 0xb8, 0x27, 0xcf, 0xf0, 0xea, 0x9e, 0x3c, + 0x5c, 0xe3, 0xa7, 0x7b, 0xf2, 0xfc, 0x10, 0x93, 0x61, 0x22, 0x4a, 0xab, 0xee, 0xcb, 0x73, 0x40, + 0xd5, 0x17, 0x07, 0xc1, 0x15, 0x5a, 0xb1, 0x74, 0x2c, 0x90, 0x67, 0x70, 0x8a, 0xb8, 0xf1, 0x50, + 0x8e, 0xa1, 0xe1, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, + 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, + 0x99, 0xa5, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, 0xe7, 0xe7, 0xea, 0x83, 0x5d, 0xa3, 0x9b, + 0x97, 0x5a, 0x52, 0x9e, 0x5f, 0x94, 0x0d, 0xe5, 0x25, 0x16, 0x64, 0xea, 0xa7, 0xe7, 0x63, 0x04, + 0x49, 0x12, 0x1b, 0x38, 0x2c, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xfd, 0xb6, 0xb3, 0x7b, + 0x85, 0x01, 0x00, 0x00, +} + +func (m *Group) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Group) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Services) > 0 { + for iNdEx := len(m.Services) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Services[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGroup(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { + offset -= sovGroup(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Group) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovGroup(uint64(l)) + } + if len(m.Services) > 0 { + for _, e := range m.Services { + l = e.Size() + n += 1 + l + sovGroup(uint64(l)) + } + } + return n +} + +func sovGroup(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroup(x uint64) (n int) { + return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Group) String() string { + if this == nil { + return "nil" + } + repeatedStringForServices := "[]Service{" + for _, f := range this.Services { + repeatedStringForServices += fmt.Sprintf("%v", f) + "," + } + repeatedStringForServices += "}" + s := strings.Join([]string{`&Group{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Services:` + repeatedStringForServices + `,`, + `}`, + }, "") + return s +} +func valueToStringGroup(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Group) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Group: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGroup + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Services", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Services = append(m.Services, Service{}) + if err := m.Services[len(m.Services)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroup(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroup + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroup + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/manifest/v2beta3/groups.go b/go/manifest/v2beta3/groups.go new file mode 100644 index 00000000..1572fe09 --- /dev/null +++ b/go/manifest/v2beta3/groups.go @@ -0,0 +1,88 @@ +package v2beta3 + +import ( + "fmt" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" +) + +type Groups []Group + +func (groups Groups) Validate() error { + helper := validateManifestGroupsHelper{ + hostnames: make(map[string]int), + } + + names := make(map[string]int) // used as a set + + for _, group := range groups { + if err := group.Validate(&helper); err != nil { + return err + } + if _, exists := names[group.GetName()]; exists { + return fmt.Errorf("%w: duplicate group %q", ErrInvalidManifest, group.GetName()) + } + + names[group.GetName()] = 0 // Value stored is not used + } + + if helper.globalServiceCount == 0 { + return fmt.Errorf("%w: zero global services", ErrInvalidManifest) + } + + return nil +} + +func (groups Groups) CheckAgainstGSpecs(gspecs dtypes.GroupSpecs) error { + gspecs = gspecs.Dup() + + if err := groups.Validate(); err != nil { + return err + } + + if len(groups) != len(gspecs) { + return fmt.Errorf("invalid manifest: group count mismatch (%v != %v)", len(groups), len(gspecs)) + } + + dgroupByName := newGroupSpecsHelper(gspecs) + + for _, mgroup := range groups { + dgroup, dgroupExists := dgroupByName[mgroup.GetName()] + + if !dgroupExists { + return fmt.Errorf("invalid manifest: unknown deployment group ('%v')", mgroup.GetName()) + } + + if err := mgroup.checkAgainstGSpec(dgroup); err != nil { + return err + } + } + + for _, gspec := range dgroupByName { + for resID, eps := range gspec.endpoints { + if eps.httpEndpoints > 0 { + return fmt.Errorf("%w: group %q: resources ID (%d): under-utilized (%d) HTTP endpoints", + ErrManifestCrossValidation, gspec.gs.Name, resID, eps.httpEndpoints) + } + + if eps.portEndpoints > 0 { + return fmt.Errorf("%w: group %q: resources ID (%d): under-utilized (%d) PORT endpoints", + ErrManifestCrossValidation, gspec.gs.Name, resID, eps.portEndpoints) + } + + if eps.ipEndpoints > 0 { + return fmt.Errorf("%w: group %q: resources ID (%d): under-utilized (%d) IP endpoints", + ErrManifestCrossValidation, gspec.gs.Name, resID, eps.ipEndpoints) + } + } + + for _, gRes := range gspec.gs.Resources { + if gRes.Count > 0 { + return fmt.Errorf("%w: group %q: resources ID (%d): under-utilized (%d) resources", + ErrManifestCrossValidation, gspec.gs.GetName(), gRes.ID, gRes.Count) + } + } + } + + return nil +} diff --git a/go/manifest/v2beta3/helpers.go b/go/manifest/v2beta3/helpers.go new file mode 100644 index 00000000..d5acd63c --- /dev/null +++ b/go/manifest/v2beta3/helpers.go @@ -0,0 +1,98 @@ +package v2beta3 + +import ( + k8svalidation "k8s.io/apimachinery/pkg/util/validation" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + resources "github.com/akash-network/akash-api/go/node/types/resources/v1" +) + +type validateManifestGroupsHelper struct { + hostnames map[string]int // used as a set + globalServiceCount uint +} + +type validateEndpoints struct { + httpEndpoints uint + portEndpoints uint + ipEndpoints uint +} + +type validateEndpointsHelper map[uint32]*validateEndpoints + +type groupSpec struct { + gs dtypes.GroupSpec + endpoints validateEndpointsHelper +} + +type groupSpecHelper map[string]*groupSpec + +func newGroupSpecsHelper(gspecs dtypes.GroupSpecs) groupSpecHelper { + res := make(groupSpecHelper) + + for _, gspec := range gspecs { + res[gspec.GetName()] = newGroupSpecHelper(*gspec) + } + + return res +} + +func newGroupSpecHelper(gs dtypes.GroupSpec) *groupSpec { + res := &groupSpec{ + gs: gs, + endpoints: make(validateEndpointsHelper), + } + + for _, gRes := range gs.Resources { + vep := &validateEndpoints{} + + for _, ep := range gRes.Endpoints { + switch ep.Kind { + case resources.Endpoint_SHARED_HTTP: + vep.httpEndpoints++ + case resources.Endpoint_RANDOM_PORT: + vep.portEndpoints++ + case resources.Endpoint_LEASED_IP: + vep.ipEndpoints++ + } + } + + res.endpoints[gRes.ID] = vep + } + + return res +} + +func isValidHostname(hostname string) bool { + return len(hostname) <= hostnameMaxLen && 0 == len(k8svalidation.IsDNS1123Subdomain(hostname)) +} + +func (ve *validateEndpoints) tryDecHTTP() bool { + if ve.httpEndpoints == 0 { + return false + } + + ve.httpEndpoints-- + + return true +} + +func (ve *validateEndpoints) tryDecPort() bool { + if ve.portEndpoints == 0 { + return false + } + + ve.portEndpoints-- + + return true +} + +func (ve *validateEndpoints) tryDecIP() bool { + if ve.ipEndpoints == 0 { + return false + } + + ve.ipEndpoints-- + + return true +} diff --git a/go/manifest/v2beta3/httpoptions.pb.go b/go/manifest/v2beta3/httpoptions.pb.go new file mode 100644 index 00000000..dd66334c --- /dev/null +++ b/go/manifest/v2beta3/httpoptions.pb.go @@ -0,0 +1,536 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/manifest/v2beta3/httpoptions.proto + +package v2beta3 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ServiceExposeHTTPOptions +type ServiceExposeHTTPOptions struct { + MaxBodySize uint32 `protobuf:"varint,1,opt,name=max_body_size,json=maxBodySize,proto3" json:"maxBodySize" yaml:"maxBodySize"` + ReadTimeout uint32 `protobuf:"varint,2,opt,name=read_timeout,json=readTimeout,proto3" json:"readTimeout" yaml:"readTimeout"` + SendTimeout uint32 `protobuf:"varint,3,opt,name=send_timeout,json=sendTimeout,proto3" json:"sendTimeout" yaml:"sendTimeout"` + NextTries uint32 `protobuf:"varint,4,opt,name=next_tries,json=nextTries,proto3" json:"nextTries" yaml:"nextTries"` + NextTimeout uint32 `protobuf:"varint,5,opt,name=next_timeout,json=nextTimeout,proto3" json:"nextTimeout" yaml:"nextTimeout"` + NextCases []string `protobuf:"bytes,6,rep,name=next_cases,json=nextCases,proto3" json:"nextCases,omitempty" yaml:"nextCases,omitempty"` +} + +func (m *ServiceExposeHTTPOptions) Reset() { *m = ServiceExposeHTTPOptions{} } +func (*ServiceExposeHTTPOptions) ProtoMessage() {} +func (*ServiceExposeHTTPOptions) Descriptor() ([]byte, []int) { + return fileDescriptor_ee317fccaba20357, []int{0} +} +func (m *ServiceExposeHTTPOptions) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceExposeHTTPOptions) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServiceExposeHTTPOptions.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServiceExposeHTTPOptions) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceExposeHTTPOptions.Merge(m, src) +} +func (m *ServiceExposeHTTPOptions) XXX_Size() int { + return m.Size() +} +func (m *ServiceExposeHTTPOptions) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceExposeHTTPOptions.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceExposeHTTPOptions proto.InternalMessageInfo + +func (m *ServiceExposeHTTPOptions) GetMaxBodySize() uint32 { + if m != nil { + return m.MaxBodySize + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetReadTimeout() uint32 { + if m != nil { + return m.ReadTimeout + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetSendTimeout() uint32 { + if m != nil { + return m.SendTimeout + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetNextTries() uint32 { + if m != nil { + return m.NextTries + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetNextTimeout() uint32 { + if m != nil { + return m.NextTimeout + } + return 0 +} + +func (m *ServiceExposeHTTPOptions) GetNextCases() []string { + if m != nil { + return m.NextCases + } + return nil +} + +func init() { + proto.RegisterType((*ServiceExposeHTTPOptions)(nil), "akash.manifest.v2beta3.ServiceExposeHTTPOptions") +} + +func init() { + proto.RegisterFile("akash/manifest/v2beta3/httpoptions.proto", fileDescriptor_ee317fccaba20357) +} + +var fileDescriptor_ee317fccaba20357 = []byte{ + // 397 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x92, 0x31, 0xcb, 0xd3, 0x40, + 0x1c, 0x87, 0x13, 0xab, 0x85, 0x46, 0x0b, 0x12, 0x45, 0x42, 0x87, 0x4b, 0x0d, 0x08, 0x1d, 0x34, + 0x01, 0x0b, 0x82, 0x4e, 0x12, 0x11, 0xea, 0xa4, 0xa4, 0x19, 0x8a, 0x4b, 0xb8, 0xb4, 0x67, 0x7a, + 0xd4, 0xcb, 0x85, 0xdc, 0xb5, 0x26, 0x9d, 0xfc, 0x08, 0x7e, 0xac, 0x8e, 0x1d, 0x3b, 0xc5, 0xf7, + 0x4d, 0xb7, 0x8e, 0xfd, 0x04, 0x2f, 0x97, 0xe4, 0x7d, 0x73, 0xb4, 0x5b, 0xf2, 0xfc, 0x1f, 0x1e, + 0x7e, 0xc3, 0x69, 0x23, 0xb8, 0x82, 0x6c, 0xe9, 0x10, 0x18, 0xe3, 0x5f, 0x88, 0x71, 0x67, 0xf3, + 0x3e, 0x44, 0x1c, 0x8e, 0x9d, 0x25, 0xe7, 0x09, 0x4d, 0x38, 0xa6, 0x31, 0xb3, 0x93, 0x94, 0x72, + 0xaa, 0xbf, 0xaa, 0x4c, 0xfb, 0xde, 0xb4, 0x1b, 0x73, 0xf0, 0x32, 0xa2, 0x11, 0xad, 0x14, 0x47, + 0x7c, 0xd5, 0xb6, 0xf5, 0xbf, 0xa3, 0x19, 0x53, 0x94, 0x6e, 0xf0, 0x1c, 0x7d, 0xcd, 0x12, 0xca, + 0xd0, 0xc4, 0xf7, 0x7f, 0x7c, 0xaf, 0x83, 0xfa, 0x37, 0xad, 0x4f, 0x60, 0x16, 0x84, 0x74, 0x91, + 0x07, 0x0c, 0x6f, 0x91, 0xa1, 0x0e, 0xd5, 0x51, 0xdf, 0x7d, 0x73, 0x2a, 0xcc, 0xa7, 0x04, 0x66, + 0x2e, 0x5d, 0xe4, 0x53, 0xbc, 0x45, 0xe7, 0xc2, 0xd4, 0x73, 0x48, 0x7e, 0x7f, 0xb2, 0x24, 0x68, + 0x79, 0xb2, 0xa2, 0x4f, 0xb4, 0x67, 0x29, 0x82, 0x8b, 0x80, 0x63, 0x82, 0xe8, 0x9a, 0x1b, 0x8f, + 0xda, 0x92, 0xe0, 0x7e, 0x8d, 0xdb, 0x92, 0x04, 0x2d, 0x4f, 0x56, 0x44, 0x89, 0xa1, 0xb8, 0x2d, + 0x75, 0xda, 0x92, 0xe0, 0x57, 0x25, 0x09, 0x5a, 0x9e, 0xac, 0xe8, 0x9f, 0x35, 0x2d, 0x46, 0x19, + 0x0f, 0x78, 0x8a, 0x11, 0x33, 0x1e, 0x57, 0x9d, 0xd7, 0xa7, 0xc2, 0xec, 0x09, 0xea, 0x0b, 0x78, + 0x2e, 0xcc, 0xe7, 0x75, 0xe5, 0x01, 0x59, 0x5e, 0x7b, 0x16, 0x5b, 0xea, 0x42, 0xb3, 0xe5, 0x49, + 0xbb, 0xa5, 0x92, 0x2e, 0xb7, 0x48, 0xd0, 0xf2, 0x64, 0x45, 0x9f, 0x35, 0x5b, 0xe6, 0x90, 0x21, + 0x66, 0x74, 0x87, 0x9d, 0x51, 0xcf, 0xfd, 0xb8, 0x2b, 0x4c, 0xf5, 0x54, 0x98, 0x2f, 0xc4, 0xe5, + 0x8b, 0x38, 0xbc, 0xa5, 0x04, 0x73, 0x44, 0x12, 0x9e, 0x9f, 0x0b, 0x73, 0xd0, 0x36, 0x2f, 0x8e, + 0xcd, 0xc6, 0x8a, 0xba, 0xb3, 0xc3, 0x2d, 0x50, 0xfe, 0x96, 0x40, 0xdd, 0x95, 0x40, 0xdd, 0x97, + 0x40, 0xbd, 0x29, 0x81, 0xfa, 0xef, 0x08, 0x94, 0xfd, 0x11, 0x28, 0x87, 0x23, 0x50, 0x7e, 0x7e, + 0x88, 0x30, 0x5f, 0xae, 0x43, 0x7b, 0x4e, 0x89, 0x53, 0x3d, 0x9e, 0x77, 0x31, 0xe2, 0x7f, 0x68, + 0xba, 0x6a, 0xfe, 0x60, 0x82, 0x9d, 0x88, 0x5e, 0xbd, 0xbd, 0xb0, 0x5b, 0x3d, 0xa1, 0xf1, 0x5d, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xb7, 0xe0, 0x99, 0x5a, 0x9c, 0x02, 0x00, 0x00, +} + +func (m *ServiceExposeHTTPOptions) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceExposeHTTPOptions) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceExposeHTTPOptions) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.NextCases) > 0 { + for iNdEx := len(m.NextCases) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.NextCases[iNdEx]) + copy(dAtA[i:], m.NextCases[iNdEx]) + i = encodeVarintHttpoptions(dAtA, i, uint64(len(m.NextCases[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.NextTimeout != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.NextTimeout)) + i-- + dAtA[i] = 0x28 + } + if m.NextTries != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.NextTries)) + i-- + dAtA[i] = 0x20 + } + if m.SendTimeout != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.SendTimeout)) + i-- + dAtA[i] = 0x18 + } + if m.ReadTimeout != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.ReadTimeout)) + i-- + dAtA[i] = 0x10 + } + if m.MaxBodySize != 0 { + i = encodeVarintHttpoptions(dAtA, i, uint64(m.MaxBodySize)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintHttpoptions(dAtA []byte, offset int, v uint64) int { + offset -= sovHttpoptions(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ServiceExposeHTTPOptions) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.MaxBodySize != 0 { + n += 1 + sovHttpoptions(uint64(m.MaxBodySize)) + } + if m.ReadTimeout != 0 { + n += 1 + sovHttpoptions(uint64(m.ReadTimeout)) + } + if m.SendTimeout != 0 { + n += 1 + sovHttpoptions(uint64(m.SendTimeout)) + } + if m.NextTries != 0 { + n += 1 + sovHttpoptions(uint64(m.NextTries)) + } + if m.NextTimeout != 0 { + n += 1 + sovHttpoptions(uint64(m.NextTimeout)) + } + if len(m.NextCases) > 0 { + for _, s := range m.NextCases { + l = len(s) + n += 1 + l + sovHttpoptions(uint64(l)) + } + } + return n +} + +func sovHttpoptions(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozHttpoptions(x uint64) (n int) { + return sovHttpoptions(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ServiceExposeHTTPOptions) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceExposeHTTPOptions{`, + `MaxBodySize:` + fmt.Sprintf("%v", this.MaxBodySize) + `,`, + `ReadTimeout:` + fmt.Sprintf("%v", this.ReadTimeout) + `,`, + `SendTimeout:` + fmt.Sprintf("%v", this.SendTimeout) + `,`, + `NextTries:` + fmt.Sprintf("%v", this.NextTries) + `,`, + `NextTimeout:` + fmt.Sprintf("%v", this.NextTimeout) + `,`, + `NextCases:` + fmt.Sprintf("%v", this.NextCases) + `,`, + `}`, + }, "") + return s +} +func valueToStringHttpoptions(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ServiceExposeHTTPOptions) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceExposeHTTPOptions: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceExposeHTTPOptions: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field MaxBodySize", wireType) + } + m.MaxBodySize = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.MaxBodySize |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadTimeout", wireType) + } + m.ReadTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ReadTimeout |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SendTimeout", wireType) + } + m.SendTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SendTimeout |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextTries", wireType) + } + m.NextTries = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextTries |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NextTimeout", wireType) + } + m.NextTimeout = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NextTimeout |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field NextCases", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthHttpoptions + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthHttpoptions + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.NextCases = append(m.NextCases, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipHttpoptions(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthHttpoptions + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipHttpoptions(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowHttpoptions + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthHttpoptions + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupHttpoptions + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthHttpoptions + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthHttpoptions = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowHttpoptions = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupHttpoptions = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/manifest/v2beta3/manifest.go b/go/manifest/v2beta3/manifest.go new file mode 100644 index 00000000..8c03c174 --- /dev/null +++ b/go/manifest/v2beta3/manifest.go @@ -0,0 +1,67 @@ +package v2beta3 + +import ( + "crypto/sha256" + "encoding/json" + "fmt" + "regexp" + + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" +) + +var ( + serviceNameValidationRegex = regexp.MustCompile(`^[a-z]([-a-z0-9]*[a-z0-9])?$`) + hostnameMaxLen = 255 +) + +// Manifest store list of groups +type Manifest Groups + +// GetGroups returns a manifest with groups list +func (m Manifest) GetGroups() Groups { + return Groups(m) +} + +// Validate does validation for manifest +func (m Manifest) Validate() error { + if len(m) == 0 { + return fmt.Errorf("%w: manifest is empty", ErrInvalidManifest) + } + + return m.GetGroups().Validate() +} + +func (m Manifest) CheckAgainstDeployment(dgroups []dtypes.Group) error { + gspecs := make([]*dtypes.GroupSpec, 0, len(dgroups)) + + for _, dgroup := range dgroups { + gspec := dgroup.GroupSpec + gspecs = append(gspecs, &gspec) + } + + return m.CheckAgainstGSpecs(gspecs) +} + +func (m Manifest) CheckAgainstGSpecs(gspecs dtypes.GroupSpecs) error { + return m.GetGroups().CheckAgainstGSpecs(gspecs) +} + +// Version calculates the identifying deterministic hash for an SDL. +// Sha256 returns 32 byte sum of the SDL. +func (m Manifest) Version() ([]byte, error) { + data, err := json.Marshal(m) + if err != nil { + return nil, err + } + + sortedBytes, err := sdk.SortJSON(data) + if err != nil { + return nil, err + } + + sum := sha256.Sum256(sortedBytes) + + return sum[:], nil +} diff --git a/go/manifest/v2beta3/manifest_cross_validation_test.go b/go/manifest/v2beta3/manifest_cross_validation_test.go new file mode 100644 index 00000000..be975179 --- /dev/null +++ b/go/manifest/v2beta3/manifest_cross_validation_test.go @@ -0,0 +1,196 @@ +package v2beta3 + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + attr "github.com/akash-network/akash-api/go/node/types/attributes/v1" + + "github.com/akash-network/akash-api/go/node/client/testutil/v1beta3" + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + tutil "github.com/akash-network/akash-api/go/testutil" +) + +const ( + validationPrefix = `^manifest cross-validation error: ` + groupPrefix = validationPrefix + `group ".+": ` + resourcesIDPrefix = groupPrefix + `resources ID \(\d+\): ` + servicePrefix = groupPrefix + `service ".+": ` + resourcesMismatchRegex = servicePrefix + `CPU|GPU|Memory|Storage resources mismatch for ID \d+$` + overUtilizedGroup = servicePrefix + `over-utilized replicas \(\d+\) > group spec resources count \(\d+\)$` + overUtilizedEndpoints = servicePrefix + `resources ID \(\d+\): over-utilized HTTP|PORT|IP endpoints$` + underUtilizedGroupResources = resourcesIDPrefix + `under-utilized \(\d+\) resources` + underUtilizedGroupEndpoints = resourcesIDPrefix + `under-utilized \(\d+\) HTTP|PORT|IP endpoints` +) + +func TestManifestWithEmptyDeployment(t *testing.T) { + m := simpleManifest(1) + deployment := make([]dtypes.Group, 0) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) +} + +func simpleDeployment(t *testing.T, expose ServiceExposes, count uint32) []dtypes.Group { + deployment := make([]dtypes.Group, 1) + gid := testutil.GroupID(t) + resources := make(dtypes.ResourceUnits, 1) + resources[0] = dtypes.ResourceUnit{ + Resources: simpleResources(expose), + Count: count, + Price: sdk.NewInt64DecCoin(tutil.CoinDenom, 1), + } + deployment[0] = dtypes.Group{ + GroupID: gid, + State: 0, + GroupSpec: dtypes.GroupSpec{ + Name: nameOfTestGroup, + Requirements: attr.PlacementRequirements{}, + Resources: resources, + }, + } + + return deployment +} + +func TestManifestWithDeployment(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + err := m.CheckAgainstDeployment(deployment) + require.NoError(t, err) +} + +func TestManifestWithDeploymentMultipleCount(t *testing.T) { + addl := uint32(tutil.RandRangeInt(1, 20)) + m := simpleManifest(addl) + + deployment := simpleDeployment(t, m[0].Services[0].Expose, addl) + + err := m.CheckAgainstDeployment(deployment) + require.NoError(t, err) +} + +func TestManifestWithDeploymentMultiple(t *testing.T) { + cpu := int64(tutil.RandRangeInt(1024, 2000)) + storage := int64(tutil.RandRangeInt(2000, 3000)) + memory := int64(tutil.RandRangeInt(3001, 4000)) + + m := make(Manifest, 3) + m[0] = simpleManifest(1)[0] + m[0].Services[0].Resources.CPU.Units.Val = sdk.NewInt(cpu) + m[0].Name = "testgroup-2" + + m[1] = simpleManifest(1)[0] + m[1].Services[0].Resources.Storage[0].Quantity.Val = sdk.NewInt(storage) + m[1].Name = "testgroup-1" + m[1].Services[0].Expose[0].Hosts = []string{"host1.test"} + + m[2] = simpleManifest(1)[0] + m[2].Services[0].Resources.Memory.Quantity.Val = sdk.NewInt(memory) + m[2].Name = "testgroup-0" + m[2].Services[0].Expose[0].Hosts = []string{"host2.test"} + + deployment := make([]dtypes.Group, 3) + deployment[0] = simpleDeployment(t, m[0].Services[0].Expose, 1)[0] + deployment[0].GroupSpec.Resources[0].Memory.Quantity.Val = sdk.NewInt(memory) + deployment[0].GroupSpec.Name = "testgroup-0" + + deployment[1] = simpleDeployment(t, m[1].Services[0].Expose, 1)[0] + deployment[1].GroupSpec.Resources[0].Storage[0].Quantity.Val = sdk.NewInt(storage) + deployment[1].GroupSpec.Name = "testgroup-1" + + deployment[2] = simpleDeployment(t, m[2].Services[0].Expose, 1)[0] + deployment[2].GroupSpec.Resources[0].CPU.Units.Val = sdk.NewInt(cpu) + deployment[2].GroupSpec.Name = "testgroup-2" + + err := m.CheckAgainstDeployment(deployment) + + require.NoError(t, err) +} + +func TestManifestWithDeploymentCPUMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].CPU.Units.Val = sdk.NewInt(999) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, resourcesMismatchRegex, err) +} + +func TestManifestWithDeploymentGPUMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].GPU.Units.Val = sdk.NewInt(200) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, resourcesMismatchRegex, err) +} + +func TestManifestWithDeploymentMemoryMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].Memory.Quantity.Val = sdk.NewInt(99999) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, resourcesMismatchRegex, err) +} + +func TestManifestWithDeploymentStorageMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].Storage[0].Quantity.Val = sdk.NewInt(99999) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, resourcesMismatchRegex, err) +} + +func TestManifestWithDeploymentCountMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + deployment[0].GroupSpec.Resources[0].Count++ + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, underUtilizedGroupResources, err) +} + +func TestManifestWithManifestGroupMismatch(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + m[0].Services[0].Count++ + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, overUtilizedGroup, err) +} + +func TestManifestWithEndpointMismatchA(t *testing.T) { + m := simpleManifest(1) + + // Make this require an endpoint + m[0].Services[0].Expose[0] = ServiceExpose{ + Port: 2000, + ExternalPort: 0, + Proto: TCP, + Service: "", + Global: true, + Hosts: nil, + } + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + + // Remove an endpoint where the manifest calls for it + deployment[0].GroupSpec.Resources[0].Endpoints = akashtypes.Endpoints{} + + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, overUtilizedEndpoints, err) +} + +func TestManifestWithEndpointMismatchB(t *testing.T) { + m := simpleManifest(1) + deployment := simpleDeployment(t, m[0].Services[0].Expose, 1) + // Add an endpoint where the manifest doesn't call for it + deployment[0].GroupSpec.Resources[0].Endpoints = append(deployment[0].GroupSpec.Resources[0].Endpoints, akashtypes.Endpoint{}) + err := m.CheckAgainstDeployment(deployment) + require.Error(t, err) + require.Regexp(t, underUtilizedGroupEndpoints, err) +} diff --git a/go/manifest/v2beta3/manifest_test.go b/go/manifest/v2beta3/manifest_test.go new file mode 100644 index 00000000..12eaf81f --- /dev/null +++ b/go/manifest/v2beta3/manifest_test.go @@ -0,0 +1,469 @@ +package v2beta3 + +import ( + "bytes" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/akash-network/akash-api/go/node/client/testutil/v1beta3" + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + resources "github.com/akash-network/akash-api/go/node/types/resources/v1" +) + +const ( + nameOfTestService = "test-service" + nameOfTestGroup = "testGroup" +) + +var ( + randCPU1 = uint64(testutil.RandCPUUnits()) + randCPU2 = randCPU1 + 1 + randGPU1 = uint64(testutil.RandGPUUnits()) + randMemory = testutil.RandMemoryQuantity() + randStorage = testutil.RandStorageQuantity() +) + +var randUnits1 = resources.Resources{ + ID: 1, + CPU: &resources.CPU{ + Units: resources.NewResourceValue(randCPU1), + }, + GPU: &resources.GPU{ + Units: resources.NewResourceValue(randGPU1), + }, + Memory: &resources.Memory{ + Quantity: resources.NewResourceValue(randMemory), + }, + Storage: resources.Volumes{ + resources.Storage{ + Quantity: resources.NewResourceValue(randStorage), + }, + }, + Endpoints: resources.Endpoints{}, +} + +var randUnits3 = resources.Resources{ + ID: 1, + CPU: &resources.CPU{ + Units: resources.NewResourceValue(randCPU2), + }, + Memory: &resources.Memory{ + Quantity: resources.NewResourceValue(randMemory), + }, + Storage: resources.Volumes{ + resources.Storage{ + Quantity: resources.NewResourceValue(randStorage), + }, + }, + Endpoints: resources.Endpoints{}, +} + +func simpleResources(exposes ServiceExposes) resources.Resources { + return resources.Resources{ + ID: 1, + CPU: &resources.CPU{ + Units: resources.ResourceValue{ + Val: sdk.NewIntFromUint64(randCPU1), + }, + Attributes: nil, + }, + Memory: &resources.Memory{ + Quantity: resources.ResourceValue{ + Val: sdk.NewIntFromUint64(randMemory), + }, + Attributes: nil, + }, + GPU: &resources.GPU{ + Units: resources.ResourceValue{ + Val: sdk.NewIntFromUint64(randGPU1), + }, + Attributes: nil, + }, + Storage: resources.Volumes{ + resources.Storage{ + Name: "default", + Quantity: resources.ResourceValue{ + Val: sdk.NewIntFromUint64(randStorage), + }, + }, + }, + Endpoints: exposes.GetEndpoints(), + } +} + +func TestNilManifestIsInvalid(t *testing.T) { + m := Manifest{} + err := m.Validate() + + require.Error(t, err) + require.Regexp(t, "^.*manifest is empty.*$", err) +} + +func simpleManifest(svcCount uint32) Manifest { + expose := make([]ServiceExpose, 1) + expose[0].Global = true + expose[0].Port = 80 + expose[0].Proto = TCP + expose[0].Hosts = make([]string, 1) + expose[0].Hosts[0] = "host.test" + + services := make([]Service, 1) + services[0] = Service{ + Name: nameOfTestService, + Image: "test/image:1.0", + Command: nil, + Args: nil, + Env: nil, + Resources: simpleResources(expose), + Count: svcCount, + Expose: expose, + } + + m := make(Manifest, 1) + m[0] = Group{ + Name: nameOfTestGroup, + Services: services, + } + + return m +} + +func TestSimpleManifestIsValid(t *testing.T) { + m := simpleManifest(1) + err := m.Validate() + require.NoError(t, err) +} + +func TestSimpleManifestInvalidResourcesID(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Resources.ID = 0 + err := m.Validate() + require.Error(t, err) +} + +func TestManifestWithNoGlobalServicesIsInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Expose[0].Global = false + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*zero global services.*$", err) +} + +func TestManifestWithBadServiceNameIsInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Name = "a_bad_service_name" // should not contain underscores + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*name is invalid.*$", err) + + m[0].Services[0].Name = "a-name-" // should not end with dash + err = m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*name is invalid.*$", err) +} + +func TestManifestWithServiceNameIsValid(t *testing.T) { + m := simpleManifest(1) + + m[0].Services[0].Name = "9aaa-bar" // does not allow starting with a number + err := m.Validate() + require.ErrorIs(t, err, ErrInvalidManifest) + require.Regexp(t, "^.*name is invalid.*$", err) +} + +func TestManifestWithDuplicateHostIsInvalid(t *testing.T) { + m := simpleManifest(1) + hosts := make([]string, 2) + const hostname = "a.test" + hosts[0] = hostname + hosts[1] = hostname + m[0].Services[0].Expose[0].Hosts = hosts + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*hostname.+is duplicated.*$", err) +} + +func TestManifestWithDashInHostname(t *testing.T) { + m := simpleManifest(1) + hosts := make([]string, 1) + hosts[0] = "a-test.com" + m[0].Services[0].Expose[0].Hosts = hosts + err := m.Validate() + require.NoError(t, err) +} + +func TestManifestWithBadHostIsInvalid(t *testing.T) { + m := simpleManifest(1) + hosts := make([]string, 2) + hosts[0] = "bob.test" // valid + hosts[1] = "-bob" // invalid + m[0].Services[0].Expose[0].Hosts = hosts + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*invalid hostname.*$", err) +} + +func TestManifestWithLongHostIsInvalid(t *testing.T) { + m := simpleManifest(1) + hosts := make([]string, 1) + buf := &bytes.Buffer{} + for i := 0; i != 255; i++ { + _, err := buf.WriteRune('a') + require.NoError(t, err) + } + _, err := buf.WriteString(".com") + require.NoError(t, err) + + hosts[0] = buf.String() + m[0].Services[0].Expose[0].Hosts = hosts + err = m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*invalid hostname.*$", err) +} + +func TestManifestWithDuplicateGroupIsInvalid(t *testing.T) { + mDuplicate := make(Manifest, 2) + mDuplicate[0] = simpleManifest(1)[0] + mDuplicate[1] = simpleManifest(1)[0] + mDuplicate[1].Services[0].Expose[0].Hosts[0] = "anotherhost.test" + err := mDuplicate.Validate() + require.Error(t, err) + require.Regexp(t, "^.*duplicate group.*$", err) +} + +func TestManifestWithNoServicesInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services = nil + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*contains no services.*$", err) +} + +func TestManifestWithEmptyServiceNameInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Name = "" + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*service name is empty.*$", err) +} + +func TestManifestWithEmptyImageNameInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Image = "" + err := m.Validate() + require.Error(t, err) + require.Regexp(t, "^.*service.+has empty image name.*$", err) +} + +func TestManifestWithEmptyEnvValueIsValid(t *testing.T) { + m := simpleManifest(1) + envVars := make([]string, 1) + envVars[0] = "FOO=" // sets FOO to empty string + m[0].Services[0].Env = envVars + err := m.Validate() + require.NoError(t, err) +} + +func TestManifestWithEmptyEnvNameIsInvalid(t *testing.T) { + m := simpleManifest(1) + envVars := make([]string, 1) + envVars[0] = "=FOO" // invalid + m[0].Services[0].Env = envVars + err := m.Validate() + require.Error(t, err) + require.Regexp(t, `^.*var\. with an empty name.*$`, err) +} + +func TestManifestWithBadEnvNameIsInvalid(t *testing.T) { + m := simpleManifest(1) + envVars := make([]string, 1) + envVars[0] = "9VAR=FOO" // invalid because it starts with a digit + m[0].Services[0].Env = envVars + err := m.Validate() + require.Error(t, err) + require.Regexp(t, `^.*var\. with an invalid name.*$`, err) +} + +func TestManifestServiceUnknownProtocolIsInvalid(t *testing.T) { + m := simpleManifest(1) + m[0].Services[0].Expose[0].Proto = "ICMP" + err := m.Validate() + require.Error(t, err) + require.Regexp(t, `^.*protocol .+ unknown.*$`, err) +} + +func Test_ValidateManifest(t *testing.T) { + expose := make([]ServiceExpose, 1) + expose[0].Global = true + expose[0].Port = 80 + expose[0].Proto = TCP + expose[0].Hosts = make([]string, 1) + expose[0].Hosts[0] = "host.test" + + tests := []struct { + name string + ok bool + mani Manifest + dgroups []*dtypes.GroupSpec + }{ + { + name: "empty", + ok: false, + }, + + { + name: "single", + ok: true, + mani: []Group{ + { + Name: "foo", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: simpleResources(expose), + Count: 3, + Expose: expose, + }, + }, + }, + }, + dgroups: []*dtypes.GroupSpec{ + { + Name: "foo", + Resources: dtypes.ResourceUnits{ + { + Resources: simpleResources(expose), + Count: 3, + }, + }, + }, + }, + }, + + { + name: "multi-mgroup", + ok: true, + mani: []Group{ + { + Name: "foo", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: simpleResources(expose), + Count: 1, + Expose: expose, + }, + { + Name: "svc1", + Image: "test", + Resources: simpleResources(expose), + Count: 2, + }, + }, + }, + }, + dgroups: []*dtypes.GroupSpec{ + { + Name: "foo", + Resources: dtypes.ResourceUnits{ + { + Resources: simpleResources(expose), + Count: 3, + }, + }, + }, + }, + }, + + { + name: "mismatch-name", + ok: false, + mani: []Group{ + { + Name: "foo-bad", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: randUnits1, + Count: 3, + }, + }, + }, + }, + dgroups: []*dtypes.GroupSpec{ + { + Name: "foo", + Resources: dtypes.ResourceUnits{ + { + Resources: randUnits1, + Count: 3, + }, + }, + }, + }, + }, + + { + name: "mismatch-cpu", + ok: false, + mani: []Group{ + { + Name: "foo", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: randUnits3, + Count: 3, + }, + }, + }, + }, + dgroups: []*dtypes.GroupSpec{ + { + Name: "foo", + Resources: dtypes.ResourceUnits{ + { + Resources: randUnits1, + Count: 3, + }, + }, + }, + }, + }, + + { + name: "mismatch-group-count", + ok: false, + mani: []Group{ + { + Name: "foo", + Services: []Service{ + { + Name: "svc1", + Image: "test", + Resources: randUnits3, + Count: 3, + }, + }, + }, + }, + dgroups: []*dtypes.GroupSpec{}, + }, + } + + for _, test := range tests { + err := test.mani.CheckAgainstGSpecs(test.dgroups) + if test.ok { + assert.NoError(t, err, test.name) + } else { + assert.Error(t, err, test.name) + } + } +} diff --git a/go/manifest/v2beta3/parse.go b/go/manifest/v2beta3/parse.go new file mode 100644 index 00000000..6647e04c --- /dev/null +++ b/go/manifest/v2beta3/parse.go @@ -0,0 +1,65 @@ +package v2beta3 + +import ( + "errors" + "fmt" + "strings" + + corev1 "k8s.io/api/core/v1" +) + +var ( + errUnknownServiceProtocol = errors.New("unknown service protocol") + ErrUnsupportedServiceProtocol = errors.New("unsupported service protocol") +) + +type ServiceProtocol string + +const ( + TCP = ServiceProtocol("TCP") + UDP = ServiceProtocol("UDP") +) + +func (sp ServiceProtocol) ToString() string { + return string(sp) +} + +func (sp ServiceProtocol) ToKube() (corev1.Protocol, error) { + switch sp { + case TCP: + return corev1.ProtocolTCP, nil + case UDP: + return corev1.ProtocolUDP, nil + } + + return corev1.Protocol(""), fmt.Errorf("%w: %v", errUnknownServiceProtocol, sp) +} + +func ServiceProtocolFromKube(proto corev1.Protocol) (ServiceProtocol, error) { + switch proto { + case corev1.ProtocolTCP: + return TCP, nil + case corev1.ProtocolUDP: + return UDP, nil + } + + return ServiceProtocol(""), fmt.Errorf("%w: %v", errUnknownServiceProtocol, proto) +} + +func ParseServiceProtocol(input string) (ServiceProtocol, error) { + var result ServiceProtocol + + // This is not a case-sensitive parse, so make all input uppercase + input = strings.ToUpper(input) + + switch input { + case "TCP", "": // The empty string (no input) implies TCP + result = TCP + case "UDP": + result = UDP + default: + return result, ErrUnsupportedServiceProtocol + } + + return result, nil +} diff --git a/go/manifest/v2beta3/service.go b/go/manifest/v2beta3/service.go new file mode 100644 index 00000000..023ad3d4 --- /dev/null +++ b/go/manifest/v2beta3/service.go @@ -0,0 +1,108 @@ +package v2beta3 + +import ( + "fmt" + "sort" + "strings" + + k8svalidation "k8s.io/apimachinery/pkg/util/validation" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" +) + +func (s *Service) validate(helper *validateManifestGroupsHelper) error { + if len(s.Name) == 0 { + return fmt.Errorf("%w: service name is empty", ErrInvalidManifest) + } + + serviceNameValid := serviceNameValidationRegex.MatchString(s.Name) + if !serviceNameValid { + return fmt.Errorf("%w: service %q name is invalid", ErrInvalidManifest, s.Name) + } + + if len(s.Image) == 0 { + return fmt.Errorf("%w: service %q has empty image name", ErrInvalidManifest, s.Name) + } + + if err := s.Resources.Validate(); err != nil { + return err + } + + for _, envVar := range s.Env { + idx := strings.Index(envVar, "=") + if idx == 0 { + return fmt.Errorf("%w: service %q defines an env. var. with an empty name", ErrInvalidManifest, s.Name) + } + + var envVarName string + if idx > 0 { + envVarName = envVar[0:idx] + } else { + envVarName = envVar + } + + if 0 != len(k8svalidation.IsEnvVarName(envVarName)) { + return fmt.Errorf("%w: service %q defines an env. var. with an invalid name %q", ErrInvalidManifest, s.Name, envVarName) + } + } + + if !sort.IsSorted(s.Expose) { + return fmt.Errorf("%w: service %q: expose is not sorted", ErrInvalidManifest, s.Name) + } + + for _, serviceExpose := range s.Expose { + if err := serviceExpose.validate(helper); err != nil { + return fmt.Errorf("%w: service %q: %w", ErrInvalidManifest, s.Name, err) + } + } + + return nil +} + +func (s *Service) checkAgainstGSpec(gspec *groupSpec) error { + // find resource units by id + var gRes *dtypes.ResourceUnit + + for idx := range gspec.gs.Resources { + if s.Resources.ID == gspec.gs.Resources[idx].ID { + gRes = &gspec.gs.Resources[idx] + break + } + } + + if gRes == nil { + return fmt.Errorf("service %q: not found deployment group resources with ID = %d", s.Name, s.Resources.ID) + } + + if s.Count > gRes.Count { + return fmt.Errorf("service %q: over-utilized replicas (%d) > group spec resources count (%d)", + s.Name, s.Count, gRes.Count) + } + + // do not compare resources directly + if !s.Resources.CPU.Equal(gRes.CPU) { + return fmt.Errorf("service %q: CPU resources mismatch for ID %d", s.Name, s.Resources.ID) + } + + if !s.Resources.GPU.Equal(gRes.GPU) { + return fmt.Errorf("service %q: GPU resources mismatch for ID %d", s.Name, s.Resources.ID) + } + + if !s.Resources.Memory.Equal(gRes.Memory) { + return fmt.Errorf("service %q: Memory resources mismatch for ID %d", s.Name, s.Resources.ID) + } + + if !s.Resources.Storage.Equal(gRes.Storage) { + return fmt.Errorf("service %q: Storage resources mismatch for ID %d", s.Name, s.Resources.ID) + } + + for _, expose := range s.Expose { + if err := expose.checkAgainstResources(gRes, gspec.endpoints); err != nil { + return fmt.Errorf("service %q: resource ID %d: %w", s.Name, gRes.ID, err) + } + } + + gRes.Count -= s.Count + + return nil +} diff --git a/go/manifest/v2beta3/service.pb.go b/go/manifest/v2beta3/service.pb.go new file mode 100644 index 00000000..4073c98d --- /dev/null +++ b/go/manifest/v2beta3/service.pb.go @@ -0,0 +1,1680 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/manifest/v2beta3/service.proto + +package v2beta3 + +import ( + fmt "fmt" + v1 "github.com/akash-network/akash-api/go/node/types/resources/v1" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// StorageParams +type StorageParams struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Mount string `protobuf:"bytes,2,opt,name=mount,proto3" json:"mount" yaml:"mount"` + ReadOnly bool `protobuf:"varint,3,opt,name=read_only,json=readOnly,proto3" json:"readOnly" yaml:"readOnly"` +} + +func (m *StorageParams) Reset() { *m = StorageParams{} } +func (*StorageParams) ProtoMessage() {} +func (*StorageParams) Descriptor() ([]byte, []int) { + return fileDescriptor_6d5964c4976d68e5, []int{0} +} +func (m *StorageParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *StorageParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_StorageParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *StorageParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_StorageParams.Merge(m, src) +} +func (m *StorageParams) XXX_Size() int { + return m.Size() +} +func (m *StorageParams) XXX_DiscardUnknown() { + xxx_messageInfo_StorageParams.DiscardUnknown(m) +} + +var xxx_messageInfo_StorageParams proto.InternalMessageInfo + +func (m *StorageParams) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *StorageParams) GetMount() string { + if m != nil { + return m.Mount + } + return "" +} + +func (m *StorageParams) GetReadOnly() bool { + if m != nil { + return m.ReadOnly + } + return false +} + +// ServiceParams +type ServiceParams struct { + Storage []StorageParams `protobuf:"bytes,1,rep,name=storage,proto3" json:"storage" yaml:"storage"` + Credentials *ImageCredentials `protobuf:"bytes,10,opt,name=credentials,proto3" json:"credentials,omitempty" yaml:"credentials,omitempty"` +} + +func (m *ServiceParams) Reset() { *m = ServiceParams{} } +func (*ServiceParams) ProtoMessage() {} +func (*ServiceParams) Descriptor() ([]byte, []int) { + return fileDescriptor_6d5964c4976d68e5, []int{1} +} +func (m *ServiceParams) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceParams) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServiceParams.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServiceParams) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceParams.Merge(m, src) +} +func (m *ServiceParams) XXX_Size() int { + return m.Size() +} +func (m *ServiceParams) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceParams.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceParams proto.InternalMessageInfo + +func (m *ServiceParams) GetStorage() []StorageParams { + if m != nil { + return m.Storage + } + return nil +} + +func (m *ServiceParams) GetCredentials() *ImageCredentials { + if m != nil { + return m.Credentials + } + return nil +} + +// Credentials to fetch image from registry +type ImageCredentials struct { + Host string `protobuf:"bytes,1,opt,name=host,proto3" json:"host" yaml:"host"` + Email string `protobuf:"bytes,2,opt,name=email,proto3" json:"email" yaml:"email"` + Username string `protobuf:"bytes,3,opt,name=username,proto3" json:"username" yaml:"username"` + Password string `protobuf:"bytes,4,opt,name=password,proto3" json:"password" yaml:"password"` +} + +func (m *ImageCredentials) Reset() { *m = ImageCredentials{} } +func (*ImageCredentials) ProtoMessage() {} +func (*ImageCredentials) Descriptor() ([]byte, []int) { + return fileDescriptor_6d5964c4976d68e5, []int{2} +} +func (m *ImageCredentials) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ImageCredentials) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ImageCredentials.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ImageCredentials) XXX_Merge(src proto.Message) { + xxx_messageInfo_ImageCredentials.Merge(m, src) +} +func (m *ImageCredentials) XXX_Size() int { + return m.Size() +} +func (m *ImageCredentials) XXX_DiscardUnknown() { + xxx_messageInfo_ImageCredentials.DiscardUnknown(m) +} + +var xxx_messageInfo_ImageCredentials proto.InternalMessageInfo + +func (m *ImageCredentials) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *ImageCredentials) GetEmail() string { + if m != nil { + return m.Email + } + return "" +} + +func (m *ImageCredentials) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *ImageCredentials) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +// Service stores name, image, args, env, unit, count and expose list of service +type Service struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Image string `protobuf:"bytes,2,opt,name=image,proto3" json:"image" yaml:"image"` + Command []string `protobuf:"bytes,3,rep,name=command,proto3" json:"command" yaml:"command"` + Args []string `protobuf:"bytes,4,rep,name=args,proto3" json:"args" yaml:"args"` + Env []string `protobuf:"bytes,5,rep,name=env,proto3" json:"env" yaml:"env"` + Resources v1.Resources `protobuf:"bytes,6,opt,name=resources,proto3" json:"resources" yaml:"resources"` + Count uint32 `protobuf:"varint,7,opt,name=count,proto3" json:"count" yaml:"count"` + Expose ServiceExposes `protobuf:"bytes,8,rep,name=expose,proto3,castrepeated=ServiceExposes" json:"expose" yaml:"expose"` + Params *ServiceParams `protobuf:"bytes,9,opt,name=params,proto3" json:"params,omitempty" yaml:"params,omitempty"` +} + +func (m *Service) Reset() { *m = Service{} } +func (*Service) ProtoMessage() {} +func (*Service) Descriptor() ([]byte, []int) { + return fileDescriptor_6d5964c4976d68e5, []int{3} +} +func (m *Service) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Service) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Service.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Service) XXX_Merge(src proto.Message) { + xxx_messageInfo_Service.Merge(m, src) +} +func (m *Service) XXX_Size() int { + return m.Size() +} +func (m *Service) XXX_DiscardUnknown() { + xxx_messageInfo_Service.DiscardUnknown(m) +} + +var xxx_messageInfo_Service proto.InternalMessageInfo + +func (m *Service) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Service) GetImage() string { + if m != nil { + return m.Image + } + return "" +} + +func (m *Service) GetCommand() []string { + if m != nil { + return m.Command + } + return nil +} + +func (m *Service) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *Service) GetEnv() []string { + if m != nil { + return m.Env + } + return nil +} + +func (m *Service) GetResources() v1.Resources { + if m != nil { + return m.Resources + } + return v1.Resources{} +} + +func (m *Service) GetCount() uint32 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *Service) GetExpose() ServiceExposes { + if m != nil { + return m.Expose + } + return nil +} + +func (m *Service) GetParams() *ServiceParams { + if m != nil { + return m.Params + } + return nil +} + +func init() { + proto.RegisterType((*StorageParams)(nil), "akash.manifest.v2beta3.StorageParams") + proto.RegisterType((*ServiceParams)(nil), "akash.manifest.v2beta3.ServiceParams") + proto.RegisterType((*ImageCredentials)(nil), "akash.manifest.v2beta3.ImageCredentials") + proto.RegisterType((*Service)(nil), "akash.manifest.v2beta3.Service") +} + +func init() { + proto.RegisterFile("akash/manifest/v2beta3/service.proto", fileDescriptor_6d5964c4976d68e5) +} + +var fileDescriptor_6d5964c4976d68e5 = []byte{ + // 727 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x54, 0xbd, 0x6f, 0xd3, 0x4e, + 0x18, 0x8e, 0x7f, 0x49, 0xf3, 0x71, 0xf9, 0xb5, 0x54, 0x16, 0x50, 0xb7, 0x02, 0x5f, 0x38, 0x51, + 0x11, 0x51, 0xb0, 0xd5, 0x56, 0x02, 0x89, 0x8f, 0xc5, 0x88, 0x81, 0x09, 0x74, 0x2c, 0x88, 0x05, + 0x5d, 0x9c, 0xc3, 0xb5, 0x1a, 0xfb, 0x22, 0x9f, 0x93, 0xd2, 0x8d, 0x95, 0x8d, 0xbf, 0x03, 0xfe, + 0x91, 0x8e, 0x1d, 0x3b, 0x1d, 0x90, 0x6e, 0x19, 0x33, 0xb2, 0x80, 0xee, 0xc3, 0x75, 0x02, 0x85, + 0x8a, 0x29, 0x79, 0x9e, 0xf7, 0x79, 0xfd, 0x7e, 0x1f, 0xb8, 0x49, 0xf6, 0x09, 0xdf, 0xf3, 0x13, + 0x92, 0xc6, 0x6f, 0x29, 0xcf, 0xfd, 0xf1, 0x4e, 0x8f, 0xe6, 0x64, 0xd7, 0xe7, 0x34, 0x1b, 0xc7, + 0x21, 0xf5, 0x86, 0x19, 0xcb, 0x99, 0x7d, 0x55, 0xa9, 0xbc, 0x42, 0xe5, 0x19, 0xd5, 0xc6, 0xe5, + 0x88, 0x45, 0x4c, 0x49, 0x7c, 0xf9, 0x4f, 0xab, 0x37, 0x6e, 0xff, 0xfd, 0x9b, 0xf4, 0xdd, 0x90, + 0x71, 0xf3, 0xe5, 0x8d, 0x5b, 0x5a, 0xdb, 0x23, 0x9c, 0xfa, 0x19, 0xe5, 0x6c, 0x94, 0x85, 0x94, + 0xfb, 0xe3, 0xed, 0x12, 0x68, 0x21, 0xfa, 0x6c, 0x81, 0xe5, 0x97, 0x39, 0xcb, 0x48, 0x44, 0x5f, + 0x90, 0x8c, 0x24, 0xdc, 0xde, 0x02, 0xb5, 0x94, 0x24, 0xd4, 0xb1, 0x3a, 0x56, 0xb7, 0x15, 0xac, + 0x4d, 0x05, 0x54, 0x78, 0x26, 0x60, 0xfb, 0x90, 0x24, 0x83, 0x07, 0x48, 0x22, 0x84, 0x15, 0x69, + 0xfb, 0x60, 0x29, 0x61, 0xa3, 0x34, 0x77, 0xfe, 0x53, 0xea, 0xf5, 0xa9, 0x80, 0x9a, 0x98, 0x09, + 0xf8, 0xbf, 0x96, 0x2b, 0x88, 0xb0, 0xa6, 0xed, 0x47, 0xa0, 0x95, 0x51, 0xd2, 0x7f, 0xc3, 0xd2, + 0xc1, 0xa1, 0x53, 0xed, 0x58, 0xdd, 0x66, 0x00, 0xa7, 0x02, 0x36, 0x25, 0xf9, 0x3c, 0x1d, 0x1c, + 0xce, 0x04, 0xbc, 0xa4, 0xfd, 0x0a, 0x06, 0xe1, 0x33, 0x23, 0xfa, 0x21, 0xb3, 0xd5, 0xe5, 0x9a, + 0x6c, 0x7b, 0xa0, 0xc1, 0x75, 0xfa, 0x8e, 0xd5, 0xa9, 0x76, 0xdb, 0x3b, 0x9b, 0xde, 0xf9, 0x4d, + 0xf5, 0x16, 0xaa, 0x0c, 0x6e, 0x1c, 0x09, 0x58, 0x99, 0x0a, 0x58, 0x78, 0xcf, 0x04, 0x5c, 0xd1, + 0x71, 0x0d, 0x81, 0x70, 0x61, 0xb2, 0x3f, 0x58, 0xa0, 0x1d, 0x66, 0xb4, 0x4f, 0xd3, 0x3c, 0x26, + 0x03, 0xee, 0x80, 0x8e, 0xd5, 0x6d, 0xef, 0x74, 0xff, 0x14, 0xe8, 0x59, 0x42, 0x22, 0xfa, 0xa4, + 0xd4, 0x07, 0x8f, 0x8f, 0x04, 0xb4, 0xa6, 0x02, 0x5e, 0x99, 0xfb, 0xc8, 0x1d, 0x96, 0xc4, 0x39, + 0x4d, 0x86, 0xb9, 0xac, 0xf8, 0x9a, 0x8e, 0x7c, 0xae, 0x19, 0xe1, 0xf9, 0xd8, 0x68, 0x6a, 0x81, + 0xd5, 0x5f, 0x03, 0xc8, 0x91, 0xed, 0x31, 0x9e, 0xcf, 0x8f, 0x4c, 0xe2, 0x72, 0x64, 0x12, 0x21, + 0xac, 0x48, 0x39, 0x32, 0x9a, 0x90, 0x78, 0x30, 0x3f, 0x32, 0x45, 0x94, 0x23, 0x53, 0x10, 0x61, + 0x4d, 0xdb, 0x0f, 0x41, 0x73, 0xc4, 0x69, 0xa6, 0x96, 0xa2, 0xaa, 0x7c, 0xd4, 0xc4, 0x0a, 0xae, + 0x9c, 0x58, 0xc1, 0x20, 0x7c, 0x66, 0x94, 0xce, 0x43, 0xc2, 0xf9, 0x01, 0xcb, 0xfa, 0x4e, 0xad, + 0x74, 0x2e, 0xb8, 0xd2, 0xb9, 0x60, 0x10, 0x3e, 0x33, 0xa2, 0xef, 0x35, 0xd0, 0x30, 0xe3, 0xfe, + 0xe7, 0xb5, 0x8c, 0x65, 0x93, 0xe6, 0x6b, 0x54, 0x44, 0x59, 0xa3, 0x82, 0x08, 0x6b, 0xda, 0xbe, + 0x0f, 0x1a, 0x21, 0x4b, 0x12, 0x92, 0xf6, 0x9d, 0x6a, 0xa7, 0xda, 0x6d, 0x05, 0xd7, 0xe5, 0x6e, + 0x18, 0xaa, 0xdc, 0x0d, 0x43, 0x20, 0x5c, 0x98, 0x64, 0x5a, 0x24, 0x8b, 0xb8, 0x53, 0x53, 0x5e, + 0x2a, 0x2d, 0x89, 0xcb, 0xb4, 0x24, 0x42, 0x58, 0x91, 0xf6, 0x16, 0xa8, 0xd2, 0x74, 0xec, 0x2c, + 0x29, 0xed, 0xba, 0xd9, 0x0a, 0x49, 0xcd, 0x04, 0x04, 0xa6, 0xf5, 0xe9, 0x18, 0x61, 0x49, 0xd9, + 0x91, 0xbc, 0x14, 0x73, 0xac, 0x4e, 0x5d, 0xad, 0x1c, 0x32, 0x2b, 0x27, 0xcf, 0xda, 0x2b, 0x2f, + 0x79, 0xbc, 0xed, 0xe1, 0x02, 0x04, 0x9b, 0x66, 0xb1, 0x4b, 0xe7, 0x99, 0x80, 0xab, 0xc5, 0x49, + 0x19, 0x0a, 0xe1, 0xd2, 0x2c, 0x9b, 0x15, 0xaa, 0x1b, 0x6e, 0x74, 0xac, 0xee, 0xb2, 0x6e, 0x56, + 0xb8, 0x78, 0xc3, 0xa1, 0xb9, 0x61, 0xf5, 0x6b, 0x0f, 0x41, 0x5d, 0x3f, 0x36, 0x4e, 0xf3, 0x82, + 0x93, 0xd3, 0xb3, 0x7b, 0xaa, 0xc4, 0xc1, 0xb6, 0xc9, 0xcc, 0x38, 0xcf, 0x04, 0x5c, 0x36, 0x35, + 0x2b, 0x8c, 0x3e, 0x7d, 0x81, 0x2b, 0x0b, 0x1e, 0x1c, 0x1b, 0xa9, 0x9d, 0x81, 0xfa, 0x50, 0xdd, + 0xad, 0xd3, 0x52, 0x8d, 0xb8, 0x28, 0xa2, 0x39, 0xf2, 0x5d, 0xd3, 0xe2, 0x55, 0xed, 0xbc, 0x70, + 0x73, 0x6b, 0xc5, 0xda, 0x2d, 0x5a, 0x10, 0x36, 0x91, 0x82, 0x57, 0x27, 0xdf, 0xdc, 0xca, 0xfb, + 0x89, 0x6b, 0x1d, 0x4d, 0x5c, 0xeb, 0x78, 0xe2, 0x5a, 0x5f, 0x27, 0xae, 0xf5, 0xf1, 0xd4, 0xad, + 0x1c, 0x9f, 0xba, 0x95, 0x93, 0x53, 0xb7, 0xf2, 0xfa, 0x5e, 0x14, 0xe7, 0x7b, 0xa3, 0x9e, 0x17, + 0xb2, 0xc4, 0x57, 0xf9, 0xdc, 0x4d, 0x69, 0x7e, 0xc0, 0xb2, 0x7d, 0x83, 0xc8, 0x30, 0xf6, 0x23, + 0xf6, 0xdb, 0x83, 0xdd, 0xab, 0xab, 0xa7, 0x77, 0xf7, 0x67, 0x00, 0x00, 0x00, 0xff, 0xff, 0x55, + 0x13, 0xfe, 0x07, 0x25, 0x06, 0x00, 0x00, +} + +func (m *StorageParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StorageParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *StorageParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ReadOnly { + i-- + if m.ReadOnly { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Mount) > 0 { + i -= len(m.Mount) + copy(dAtA[i:], m.Mount) + i = encodeVarintService(dAtA, i, uint64(len(m.Mount))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *ServiceParams) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceParams) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceParams) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Credentials != nil { + { + size, err := m.Credentials.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x52 + } + if len(m.Storage) > 0 { + for iNdEx := len(m.Storage) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Storage[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *ImageCredentials) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ImageCredentials) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ImageCredentials) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Password) > 0 { + i -= len(m.Password) + copy(dAtA[i:], m.Password) + i = encodeVarintService(dAtA, i, uint64(len(m.Password))) + i-- + dAtA[i] = 0x22 + } + if len(m.Username) > 0 { + i -= len(m.Username) + copy(dAtA[i:], m.Username) + i = encodeVarintService(dAtA, i, uint64(len(m.Username))) + i-- + dAtA[i] = 0x1a + } + if len(m.Email) > 0 { + i -= len(m.Email) + copy(dAtA[i:], m.Email) + i = encodeVarintService(dAtA, i, uint64(len(m.Email))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintService(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Service) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Service) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Service) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Params != nil { + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + if len(m.Expose) > 0 { + for iNdEx := len(m.Expose) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Expose[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + } + if m.Count != 0 { + i = encodeVarintService(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x38 + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintService(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + if len(m.Env) > 0 { + for iNdEx := len(m.Env) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Env[iNdEx]) + copy(dAtA[i:], m.Env[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.Env[iNdEx]))) + i-- + dAtA[i] = 0x2a + } + } + if len(m.Args) > 0 { + for iNdEx := len(m.Args) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Args[iNdEx]) + copy(dAtA[i:], m.Args[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.Args[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Command) > 0 { + for iNdEx := len(m.Command) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Command[iNdEx]) + copy(dAtA[i:], m.Command[iNdEx]) + i = encodeVarintService(dAtA, i, uint64(len(m.Command[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Image) > 0 { + i -= len(m.Image) + copy(dAtA[i:], m.Image) + i = encodeVarintService(dAtA, i, uint64(len(m.Image))) + i-- + dAtA[i] = 0x12 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintService(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintService(dAtA []byte, offset int, v uint64) int { + offset -= sovService(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *StorageParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Mount) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + if m.ReadOnly { + n += 2 + } + return n +} + +func (m *ServiceParams) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Storage) > 0 { + for _, e := range m.Storage { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + if m.Credentials != nil { + l = m.Credentials.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *ImageCredentials) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Email) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Username) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Password) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func (m *Service) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + l = len(m.Image) + if l > 0 { + n += 1 + l + sovService(uint64(l)) + } + if len(m.Command) > 0 { + for _, s := range m.Command { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } + } + if len(m.Args) > 0 { + for _, s := range m.Args { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } + } + if len(m.Env) > 0 { + for _, s := range m.Env { + l = len(s) + n += 1 + l + sovService(uint64(l)) + } + } + l = m.Resources.Size() + n += 1 + l + sovService(uint64(l)) + if m.Count != 0 { + n += 1 + sovService(uint64(m.Count)) + } + if len(m.Expose) > 0 { + for _, e := range m.Expose { + l = e.Size() + n += 1 + l + sovService(uint64(l)) + } + } + if m.Params != nil { + l = m.Params.Size() + n += 1 + l + sovService(uint64(l)) + } + return n +} + +func sovService(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozService(x uint64) (n int) { + return sovService(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *StorageParams) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StorageParams{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Mount:` + fmt.Sprintf("%v", this.Mount) + `,`, + `ReadOnly:` + fmt.Sprintf("%v", this.ReadOnly) + `,`, + `}`, + }, "") + return s +} +func (this *ServiceParams) String() string { + if this == nil { + return "nil" + } + repeatedStringForStorage := "[]StorageParams{" + for _, f := range this.Storage { + repeatedStringForStorage += strings.Replace(strings.Replace(f.String(), "StorageParams", "StorageParams", 1), `&`, ``, 1) + "," + } + repeatedStringForStorage += "}" + s := strings.Join([]string{`&ServiceParams{`, + `Storage:` + repeatedStringForStorage + `,`, + `Credentials:` + strings.Replace(this.Credentials.String(), "ImageCredentials", "ImageCredentials", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ImageCredentials) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ImageCredentials{`, + `Host:` + fmt.Sprintf("%v", this.Host) + `,`, + `Email:` + fmt.Sprintf("%v", this.Email) + `,`, + `Username:` + fmt.Sprintf("%v", this.Username) + `,`, + `Password:` + fmt.Sprintf("%v", this.Password) + `,`, + `}`, + }, "") + return s +} +func (this *Service) String() string { + if this == nil { + return "nil" + } + repeatedStringForExpose := "[]ServiceExpose{" + for _, f := range this.Expose { + repeatedStringForExpose += fmt.Sprintf("%v", f) + "," + } + repeatedStringForExpose += "}" + s := strings.Join([]string{`&Service{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Image:` + fmt.Sprintf("%v", this.Image) + `,`, + `Command:` + fmt.Sprintf("%v", this.Command) + `,`, + `Args:` + fmt.Sprintf("%v", this.Args) + `,`, + `Env:` + fmt.Sprintf("%v", this.Env) + `,`, + `Resources:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Resources), "Resources", "v1.Resources", 1), `&`, ``, 1) + `,`, + `Count:` + fmt.Sprintf("%v", this.Count) + `,`, + `Expose:` + repeatedStringForExpose + `,`, + `Params:` + strings.Replace(this.Params.String(), "ServiceParams", "ServiceParams", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringService(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *StorageParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StorageParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StorageParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mount", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mount = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ReadOnly", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.ReadOnly = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ServiceParams) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceParams: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceParams: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Storage = append(m.Storage, StorageParams{}) + if err := m.Storage[len(m.Storage)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Credentials", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Credentials == nil { + m.Credentials = &ImageCredentials{} + } + if err := m.Credentials.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ImageCredentials) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ImageCredentials: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ImageCredentials: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Email", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Email = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Username", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Username = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Password", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Password = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Service) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Service: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Service: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Image", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Image = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Command", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Command = append(m.Command, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Args", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Args = append(m.Args, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Env", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Env = append(m.Env, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Expose", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Expose = append(m.Expose, ServiceExpose{}) + if err := m.Expose[len(m.Expose)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowService + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthService + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthService + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Params == nil { + m.Params = &ServiceParams{} + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipService(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthService + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipService(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowService + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthService + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupService + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthService + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthService = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowService = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupService = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/manifest/v2beta3/service_expose_test.go b/go/manifest/v2beta3/service_expose_test.go new file mode 100644 index 00000000..07af159e --- /dev/null +++ b/go/manifest/v2beta3/service_expose_test.go @@ -0,0 +1,45 @@ +package v2beta3 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestShouldBeIngress(t *testing.T) { + // Should not create ingress for something on port 81 + exp := ServiceExpose{ + Global: true, + Proto: TCP, + Port: 81, + } + + require.False(t, exp.IsIngress()) + + exp = ServiceExpose{ + Global: true, + Proto: TCP, + Port: 80, + } + + // Should create ingress for something on port 80 + require.True(t, exp.IsIngress()) + + exp = ServiceExpose{ + Global: false, + Proto: TCP, + Port: 80, + } + + // Should not create ingress for something on port 80 that is not Global + require.False(t, exp.IsIngress()) + + exp = ServiceExpose{ + Global: true, + Proto: UDP, + Port: 80, + } + + // Should not create ingress for something on port 80 that is UDP + require.False(t, exp.IsIngress()) +} diff --git a/go/manifest/v2beta3/serviceexpose.go b/go/manifest/v2beta3/serviceexpose.go new file mode 100644 index 00000000..63c04bbd --- /dev/null +++ b/go/manifest/v2beta3/serviceexpose.go @@ -0,0 +1,108 @@ +package v2beta3 + +import ( + "fmt" + "math" + "sort" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + resources "github.com/akash-network/akash-api/go/node/types/resources/v1" +) + +func (s *ServiceExpose) GetEndpoints() resources.Endpoints { + if !s.Global { + return resources.Endpoints{} + } + + endpoints := make(resources.Endpoints, 0, 1) + + if len(s.IP) != 0 { + endpoints = make(resources.Endpoints, 0, 2) + + endpoints = append( + endpoints, + resources.Endpoint{ + Kind: resources.Endpoint_LEASED_IP, + SequenceNumber: s.EndpointSequenceNumber, + }, + ) + } + + kind := resources.Endpoint_RANDOM_PORT + if s.IsIngress() { + kind = resources.Endpoint_SHARED_HTTP + } + + endpoints = append(endpoints, resources.Endpoint{Kind: kind}) + + sort.Sort(endpoints) + + return endpoints +} + +func (s *ServiceExpose) validate(helper *validateManifestGroupsHelper) error { + if s.Port == 0 || s.Port > math.MaxUint16 { + return fmt.Errorf("port value must be 0 < value <= 65535 ") + } + + switch s.Proto { + case TCP, UDP: + break + default: + return fmt.Errorf("protocol %q unknown", s.Proto) + } + + if s.Global { + helper.globalServiceCount++ + } + + for _, host := range s.Hosts { + if !isValidHostname(host) { + return fmt.Errorf("has invalid hostname %q", host) + } + + _, exists := helper.hostnames[host] + if exists { + return fmt.Errorf("hostname %q is duplicated, this is not allowed", host) + } + helper.hostnames[host] = 0 // Value stored does not matter + } + + return nil +} + +func (s *ServiceExpose) checkAgainstResources(res *dtypes.ResourceUnit, eps validateEndpointsHelper) error { + if s.Global { + eph := eps[res.ID] + + if s.IsIngress() { + if !eph.tryDecHTTP() { + return fmt.Errorf("over-utilized HTTP endpoints") + } + } else { + if !eph.tryDecPort() { + return fmt.Errorf("over-utilized PORT endpoints") + } + } + + if len(s.IP) > 0 { + if !eph.tryDecIP() { + return fmt.Errorf("over-utilized IP endpoints") + } + } + } + + return nil +} + +func (s *ServiceExpose) IsIngress() bool { + return s.Proto == TCP && s.Global && 80 == s.GetExternalPort() +} + +func (s *ServiceExpose) GetExternalPort() int32 { + if s.ExternalPort == 0 { + return int32(s.Port) + } + + return int32(s.ExternalPort) +} diff --git a/go/manifest/v2beta3/serviceexpose.pb.go b/go/manifest/v2beta3/serviceexpose.pb.go new file mode 100644 index 00000000..88a5540b --- /dev/null +++ b/go/manifest/v2beta3/serviceexpose.pb.go @@ -0,0 +1,666 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/manifest/v2beta3/serviceexpose.proto + +package v2beta3 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ServiceExpose stores exposed ports and hosts details +type ServiceExpose struct { + // port on the container + Port uint32 `protobuf:"varint,1,opt,name=port,proto3" json:"port" yaml:"port"` + // port on the service definition + ExternalPort uint32 `protobuf:"varint,2,opt,name=external_port,json=externalPort,proto3" json:"externalPort" yaml:"externalPort"` + Proto ServiceProtocol `protobuf:"bytes,3,opt,name=proto,proto3,casttype=ServiceProtocol" json:"proto" yaml:"proto"` + Service string `protobuf:"bytes,4,opt,name=service,proto3" json:"service" yaml:"service"` + Global bool `protobuf:"varint,5,opt,name=global,proto3" json:"global" yaml:"global"` + Hosts []string `protobuf:"bytes,6,rep,name=hosts,proto3" json:"hosts" yaml:"hosts"` + HTTPOptions ServiceExposeHTTPOptions `protobuf:"bytes,7,opt,name=http_options,json=httpOptions,proto3" json:"httpOptions" yaml:"httpOptions"` + // The name of the IP address associated with this, if any + IP string `protobuf:"bytes,8,opt,name=ip,proto3" json:"ip" yaml:"ip"` + // The sequence number of the associated endpoint in the on-chain data + EndpointSequenceNumber uint32 `protobuf:"varint,9,opt,name=endpoint_sequence_number,json=endpointSequenceNumber,proto3" json:"endpointSequenceNumber" yaml:"endpointSequenceNumber"` +} + +func (m *ServiceExpose) Reset() { *m = ServiceExpose{} } +func (*ServiceExpose) ProtoMessage() {} +func (*ServiceExpose) Descriptor() ([]byte, []int) { + return fileDescriptor_0cbeaeb8a333db8d, []int{0} +} +func (m *ServiceExpose) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ServiceExpose) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ServiceExpose.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ServiceExpose) XXX_Merge(src proto.Message) { + xxx_messageInfo_ServiceExpose.Merge(m, src) +} +func (m *ServiceExpose) XXX_Size() int { + return m.Size() +} +func (m *ServiceExpose) XXX_DiscardUnknown() { + xxx_messageInfo_ServiceExpose.DiscardUnknown(m) +} + +var xxx_messageInfo_ServiceExpose proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ServiceExpose)(nil), "akash.manifest.v2beta3.ServiceExpose") +} + +func init() { + proto.RegisterFile("akash/manifest/v2beta3/serviceexpose.proto", fileDescriptor_0cbeaeb8a333db8d) +} + +var fileDescriptor_0cbeaeb8a333db8d = []byte{ + // 540 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x93, 0x3f, 0x6f, 0xd3, 0x40, + 0x18, 0xc6, 0xed, 0x34, 0x49, 0x9b, 0x4b, 0x02, 0xd2, 0x81, 0x8a, 0x5b, 0x54, 0x5f, 0xe4, 0x05, + 0xf3, 0xcf, 0x46, 0x8d, 0x04, 0xa8, 0x6c, 0x96, 0x90, 0x40, 0x42, 0x10, 0xb9, 0x1d, 0x10, 0x4b, + 0xe4, 0x84, 0x23, 0x39, 0x35, 0xf1, 0x1d, 0xf6, 0xa5, 0x94, 0x8d, 0x91, 0x05, 0x89, 0x8f, 0xc0, + 0xc2, 0x77, 0xe9, 0xd8, 0xb1, 0xd3, 0x09, 0x9c, 0xcd, 0xa3, 0x47, 0x26, 0xe4, 0xbb, 0xb3, 0x9a, + 0xaa, 0xed, 0xe6, 0xf7, 0x79, 0x7f, 0xcf, 0x7b, 0x8f, 0xee, 0x3d, 0x83, 0x07, 0xd1, 0x61, 0x94, + 0x4e, 0xfd, 0x79, 0x14, 0x93, 0x4f, 0x38, 0xe5, 0xfe, 0xd1, 0xee, 0x08, 0xf3, 0xa8, 0xef, 0xa7, + 0x38, 0x39, 0x22, 0x63, 0x8c, 0x8f, 0x19, 0x4d, 0xb1, 0xc7, 0x12, 0xca, 0x29, 0xdc, 0x94, 0xac, + 0x57, 0xb1, 0x9e, 0x66, 0xb7, 0x6f, 0x4f, 0xe8, 0x84, 0x4a, 0xc4, 0x2f, 0xbf, 0x14, 0xbd, 0xed, + 0x5e, 0x33, 0x79, 0xca, 0x39, 0xa3, 0x8c, 0x13, 0x1a, 0xa7, 0x8a, 0x74, 0x7e, 0x37, 0x40, 0x77, + 0x5f, 0x9d, 0xf7, 0x52, 0x9e, 0x07, 0x1f, 0x82, 0x3a, 0xa3, 0x09, 0xb7, 0xcc, 0x9e, 0xe9, 0x76, + 0x83, 0x3b, 0xb9, 0x40, 0xb2, 0x2e, 0x04, 0x6a, 0x7f, 0x8d, 0xe6, 0xb3, 0x3d, 0xa7, 0xac, 0x9c, + 0x50, 0x8a, 0xf0, 0x0d, 0xe8, 0xe2, 0x63, 0x8e, 0x93, 0x38, 0x9a, 0x0d, 0xa5, 0xab, 0x26, 0x5d, + 0xf7, 0x72, 0x81, 0x3a, 0x55, 0x63, 0xa0, 0xdc, 0xb7, 0x94, 0x7b, 0x55, 0x75, 0xc2, 0x0b, 0x10, + 0x0c, 0x40, 0x43, 0xa6, 0xb2, 0xd6, 0x7a, 0xa6, 0xdb, 0x0a, 0x1e, 0xe5, 0x02, 0x29, 0xa1, 0x10, + 0xa8, 0xa3, 0x0f, 0x97, 0xa9, 0xff, 0x09, 0x74, 0x53, 0xa7, 0x1e, 0x94, 0xc2, 0x98, 0xce, 0x42, + 0x45, 0xc2, 0x67, 0x60, 0x5d, 0xdf, 0x9f, 0x55, 0x97, 0x53, 0x76, 0x72, 0x81, 0x2a, 0xa9, 0x10, + 0xe8, 0x86, 0x9a, 0xa3, 0x05, 0x27, 0xac, 0x5a, 0xb0, 0x0f, 0x9a, 0x93, 0x19, 0x1d, 0x45, 0x33, + 0xab, 0xd1, 0x33, 0xdd, 0x8d, 0xe0, 0x6e, 0x2e, 0x90, 0x56, 0x0a, 0x81, 0xba, 0xca, 0xa6, 0x6a, + 0x27, 0xd4, 0x0d, 0xe8, 0x83, 0xc6, 0x94, 0xa6, 0x3c, 0xb5, 0x9a, 0xbd, 0x35, 0xb7, 0x15, 0x6c, + 0x95, 0x89, 0xa5, 0x70, 0x9e, 0x58, 0x96, 0x4e, 0xa8, 0x64, 0xf8, 0xc3, 0x04, 0x9d, 0x72, 0x0b, + 0x43, 0xbd, 0x06, 0x6b, 0xbd, 0x67, 0xba, 0xed, 0xdd, 0x27, 0xde, 0xd5, 0xfb, 0xf5, 0x2e, 0xec, + 0xe6, 0xd5, 0xc1, 0xc1, 0xe0, 0x9d, 0xf2, 0x05, 0xcf, 0x4f, 0x04, 0x32, 0x32, 0x81, 0xda, 0x2b, + 0x62, 0x2e, 0x50, 0xbb, 0x1c, 0xae, 0xcb, 0x42, 0x20, 0xa8, 0x33, 0x9c, 0x8b, 0x4e, 0xb8, 0x8a, + 0xc0, 0xfb, 0xa0, 0x46, 0x98, 0xb5, 0x21, 0x6f, 0x6a, 0x2b, 0x13, 0xa8, 0xf6, 0x7a, 0x90, 0x0b, + 0x54, 0x23, 0xac, 0x10, 0xa8, 0xa5, 0xcc, 0x84, 0x39, 0x61, 0x8d, 0x30, 0xb8, 0x00, 0x16, 0x8e, + 0x3f, 0x32, 0x4a, 0x62, 0x3e, 0x4c, 0xf1, 0xe7, 0x05, 0x8e, 0xc7, 0x78, 0x18, 0x2f, 0xe6, 0x23, + 0x9c, 0x58, 0x2d, 0xb9, 0xf6, 0x17, 0xb9, 0x40, 0x9b, 0x15, 0xb3, 0xaf, 0x91, 0xb7, 0x92, 0x28, + 0x04, 0xda, 0xd1, 0x0f, 0xe0, 0xca, 0xbe, 0x13, 0x5e, 0x63, 0xdc, 0xab, 0x7f, 0xff, 0x85, 0x8c, + 0xe0, 0xfd, 0xd9, 0x5f, 0xdb, 0xf8, 0x96, 0xd9, 0xe6, 0x49, 0x66, 0x9b, 0xa7, 0x99, 0x6d, 0xfe, + 0xc9, 0x6c, 0xf3, 0xe7, 0xd2, 0x36, 0x4e, 0x97, 0xb6, 0x71, 0xb6, 0xb4, 0x8d, 0x0f, 0x4f, 0x27, + 0x84, 0x4f, 0x17, 0x23, 0x6f, 0x4c, 0xe7, 0xbe, 0xbc, 0xcc, 0xc7, 0x31, 0xe6, 0x5f, 0x68, 0x72, + 0xa8, 0xab, 0x88, 0x11, 0x7f, 0x42, 0x2f, 0xfd, 0x13, 0xa3, 0xa6, 0x7c, 0x37, 0xfd, 0xff, 0x01, + 0x00, 0x00, 0xff, 0xff, 0xb0, 0x45, 0x14, 0x05, 0x8e, 0x03, 0x00, 0x00, +} + +func (m *ServiceExpose) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ServiceExpose) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ServiceExpose) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.EndpointSequenceNumber != 0 { + i = encodeVarintServiceexpose(dAtA, i, uint64(m.EndpointSequenceNumber)) + i-- + dAtA[i] = 0x48 + } + if len(m.IP) > 0 { + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0x42 + } + { + size, err := m.HTTPOptions.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintServiceexpose(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + if len(m.Hosts) > 0 { + for iNdEx := len(m.Hosts) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Hosts[iNdEx]) + copy(dAtA[i:], m.Hosts[iNdEx]) + i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Hosts[iNdEx]))) + i-- + dAtA[i] = 0x32 + } + } + if m.Global { + i-- + if m.Global { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x28 + } + if len(m.Service) > 0 { + i -= len(m.Service) + copy(dAtA[i:], m.Service) + i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Service))) + i-- + dAtA[i] = 0x22 + } + if len(m.Proto) > 0 { + i -= len(m.Proto) + copy(dAtA[i:], m.Proto) + i = encodeVarintServiceexpose(dAtA, i, uint64(len(m.Proto))) + i-- + dAtA[i] = 0x1a + } + if m.ExternalPort != 0 { + i = encodeVarintServiceexpose(dAtA, i, uint64(m.ExternalPort)) + i-- + dAtA[i] = 0x10 + } + if m.Port != 0 { + i = encodeVarintServiceexpose(dAtA, i, uint64(m.Port)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintServiceexpose(dAtA []byte, offset int, v uint64) int { + offset -= sovServiceexpose(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ServiceExpose) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Port != 0 { + n += 1 + sovServiceexpose(uint64(m.Port)) + } + if m.ExternalPort != 0 { + n += 1 + sovServiceexpose(uint64(m.ExternalPort)) + } + l = len(m.Proto) + if l > 0 { + n += 1 + l + sovServiceexpose(uint64(l)) + } + l = len(m.Service) + if l > 0 { + n += 1 + l + sovServiceexpose(uint64(l)) + } + if m.Global { + n += 2 + } + if len(m.Hosts) > 0 { + for _, s := range m.Hosts { + l = len(s) + n += 1 + l + sovServiceexpose(uint64(l)) + } + } + l = m.HTTPOptions.Size() + n += 1 + l + sovServiceexpose(uint64(l)) + l = len(m.IP) + if l > 0 { + n += 1 + l + sovServiceexpose(uint64(l)) + } + if m.EndpointSequenceNumber != 0 { + n += 1 + sovServiceexpose(uint64(m.EndpointSequenceNumber)) + } + return n +} + +func sovServiceexpose(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozServiceexpose(x uint64) (n int) { + return sovServiceexpose(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ServiceExpose) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ServiceExpose{`, + `Port:` + fmt.Sprintf("%v", this.Port) + `,`, + `ExternalPort:` + fmt.Sprintf("%v", this.ExternalPort) + `,`, + `Proto:` + fmt.Sprintf("%v", this.Proto) + `,`, + `Service:` + fmt.Sprintf("%v", this.Service) + `,`, + `Global:` + fmt.Sprintf("%v", this.Global) + `,`, + `Hosts:` + fmt.Sprintf("%v", this.Hosts) + `,`, + `HTTPOptions:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.HTTPOptions), "ServiceExposeHTTPOptions", "ServiceExposeHTTPOptions", 1), `&`, ``, 1) + `,`, + `IP:` + fmt.Sprintf("%v", this.IP) + `,`, + `EndpointSequenceNumber:` + fmt.Sprintf("%v", this.EndpointSequenceNumber) + `,`, + `}`, + }, "") + return s +} +func valueToStringServiceexpose(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ServiceExpose) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ServiceExpose: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ServiceExpose: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Port", wireType) + } + m.Port = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Port |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ExternalPort", wireType) + } + m.ExternalPort = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ExternalPort |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Proto", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Proto = ServiceProtocol(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Service", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Service = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Global", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Global = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Hosts", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Hosts = append(m.Hosts, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HTTPOptions", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.HTTPOptions.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthServiceexpose + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthServiceexpose + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 9: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field EndpointSequenceNumber", wireType) + } + m.EndpointSequenceNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.EndpointSequenceNumber |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipServiceexpose(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthServiceexpose + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipServiceexpose(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowServiceexpose + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthServiceexpose + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupServiceexpose + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthServiceexpose + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthServiceexpose = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowServiceexpose = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupServiceexpose = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/manifest/v2beta3/serviceexposes.go b/go/manifest/v2beta3/serviceexposes.go new file mode 100644 index 00000000..c639fe71 --- /dev/null +++ b/go/manifest/v2beta3/serviceexposes.go @@ -0,0 +1,51 @@ +package v2beta3 + +import ( + "sort" + + resources "github.com/akash-network/akash-api/go/node/types/resources/v1" +) + +type ServiceExposes []ServiceExpose + +var _ sort.Interface = (*ServiceExposes)(nil) + +func (s ServiceExposes) Len() int { + return len(s) +} + +func (s ServiceExposes) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s ServiceExposes) Less(i, j int) bool { + a, b := s[i], s[j] + + if a.Service != b.Service { + return a.Service < b.Service + } + + if a.Port != b.Port { + return a.Port < b.Port + } + + if a.Proto != b.Proto { + return a.Proto < b.Proto + } + + if a.Global != b.Global { + return a.Global + } + + return false +} + +func (s ServiceExposes) GetEndpoints() resources.Endpoints { + endpoints := make(resources.Endpoints, 0) + + for _, expose := range s { + endpoints = append(endpoints, expose.GetEndpoints()...) + } + + return endpoints +} diff --git a/go/manifest/v2beta3/services.go b/go/manifest/v2beta3/services.go new file mode 100644 index 00000000..abf228b5 --- /dev/null +++ b/go/manifest/v2beta3/services.go @@ -0,0 +1,21 @@ +package v2beta3 + +import ( + "sort" +) + +type Services []Service + +var _ sort.Interface = (*ServiceExposes)(nil) + +func (s Services) Len() int { + return len(s) +} + +func (s Services) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s Services) Less(i, j int) bool { + return s[i].Name < s[j].Name +} diff --git a/go/node/audit/v1beta3/errors.go b/go/node/audit/v1beta3/errors.go index dd551e72..3b635413 100644 --- a/go/node/audit/v1beta3/errors.go +++ b/go/node/audit/v1beta3/errors.go @@ -1,22 +1,16 @@ package v1beta3 import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errProviderNotFound uint32 = iota + 1 - errInvalidAddress - errAttributeNotFound + "errors" ) var ( // ErrProviderNotFound provider not found - ErrProviderNotFound = sdkerrors.Register(ModuleName, errProviderNotFound, "invalid provider: address not found") + ErrProviderNotFound = errors.New("invalid provider: address not found") // ErrInvalidAddress invalid trusted auditor address - ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") + ErrInvalidAddress = errors.New("invalid address") // ErrAttributeNotFound invalid trusted auditor address - ErrAttributeNotFound = sdkerrors.Register(ModuleName, errAttributeNotFound, "attribute not found") + ErrAttributeNotFound = errors.New("attribute not found") ) diff --git a/go/node/audit/v1beta4/audit.pb.go b/go/node/audit/v1beta4/audit.pb.go new file mode 100644 index 00000000..5d2ef354 --- /dev/null +++ b/go/node/audit/v1beta4/audit.pb.go @@ -0,0 +1,2080 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/audit/v1beta4/audit.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + github_com_akash_network_akash_api_go_node_types_attributes_v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Provider stores owner auditor and attributes details +type Provider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,4,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` +} + +func (m *Provider) Reset() { *m = Provider{} } +func (m *Provider) String() string { return proto.CompactTextString(m) } +func (*Provider) ProtoMessage() {} +func (*Provider) Descriptor() ([]byte, []int) { + return fileDescriptor_b75b0e7d403816e8, []int{0} +} +func (m *Provider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Provider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Provider) XXX_Merge(src proto.Message) { + xxx_messageInfo_Provider.Merge(m, src) +} +func (m *Provider) XXX_Size() int { + return m.Size() +} +func (m *Provider) XXX_DiscardUnknown() { + xxx_messageInfo_Provider.DiscardUnknown(m) +} + +var xxx_messageInfo_Provider proto.InternalMessageInfo + +func (m *Provider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *Provider) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *Provider) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// Attributes +type AuditedAttributes struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` +} + +func (m *AuditedAttributes) Reset() { *m = AuditedAttributes{} } +func (m *AuditedAttributes) String() string { return proto.CompactTextString(m) } +func (*AuditedAttributes) ProtoMessage() {} +func (*AuditedAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_b75b0e7d403816e8, []int{1} +} +func (m *AuditedAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AuditedAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AuditedAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AuditedAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_AuditedAttributes.Merge(m, src) +} +func (m *AuditedAttributes) XXX_Size() int { + return m.Size() +} +func (m *AuditedAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_AuditedAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_AuditedAttributes proto.InternalMessageInfo + +func (m *AuditedAttributes) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *AuditedAttributes) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *AuditedAttributes) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// AttributesResponse represents details of deployment along with group details +type AttributesResponse struct { + Attributes []AuditedAttributes `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` +} + +func (m *AttributesResponse) Reset() { *m = AttributesResponse{} } +func (m *AttributesResponse) String() string { return proto.CompactTextString(m) } +func (*AttributesResponse) ProtoMessage() {} +func (*AttributesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b75b0e7d403816e8, []int{2} +} +func (m *AttributesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AttributesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AttributesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributesResponse.Merge(m, src) +} +func (m *AttributesResponse) XXX_Size() int { + return m.Size() +} +func (m *AttributesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_AttributesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributesResponse proto.InternalMessageInfo + +func (m *AttributesResponse) GetAttributes() []AuditedAttributes { + if m != nil { + return m.Attributes + } + return nil +} + +// AttributesFilters defines filters used to filter deployments +type AttributesFilters struct { + Auditors []string `protobuf:"bytes,1,rep,name=auditors,proto3" json:"auditors" yaml:"auditors"` + Owners []string `protobuf:"bytes,2,rep,name=owners,proto3" json:"owners" yaml:"owners"` +} + +func (m *AttributesFilters) Reset() { *m = AttributesFilters{} } +func (m *AttributesFilters) String() string { return proto.CompactTextString(m) } +func (*AttributesFilters) ProtoMessage() {} +func (*AttributesFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_b75b0e7d403816e8, []int{3} +} +func (m *AttributesFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *AttributesFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_AttributesFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *AttributesFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_AttributesFilters.Merge(m, src) +} +func (m *AttributesFilters) XXX_Size() int { + return m.Size() +} +func (m *AttributesFilters) XXX_DiscardUnknown() { + xxx_messageInfo_AttributesFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_AttributesFilters proto.InternalMessageInfo + +func (m *AttributesFilters) GetAuditors() []string { + if m != nil { + return m.Auditors + } + return nil +} + +func (m *AttributesFilters) GetOwners() []string { + if m != nil { + return m.Owners + } + return nil +} + +// MsgSignProviderAttributes defines an SDK message for signing a provider attributes +type MsgSignProviderAttributes struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` +} + +func (m *MsgSignProviderAttributes) Reset() { *m = MsgSignProviderAttributes{} } +func (m *MsgSignProviderAttributes) String() string { return proto.CompactTextString(m) } +func (*MsgSignProviderAttributes) ProtoMessage() {} +func (*MsgSignProviderAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_b75b0e7d403816e8, []int{4} +} +func (m *MsgSignProviderAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSignProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSignProviderAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSignProviderAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSignProviderAttributes.Merge(m, src) +} +func (m *MsgSignProviderAttributes) XXX_Size() int { + return m.Size() +} +func (m *MsgSignProviderAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSignProviderAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSignProviderAttributes proto.InternalMessageInfo + +func (m *MsgSignProviderAttributes) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *MsgSignProviderAttributes) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *MsgSignProviderAttributes) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. +type MsgSignProviderAttributesResponse struct { +} + +func (m *MsgSignProviderAttributesResponse) Reset() { *m = MsgSignProviderAttributesResponse{} } +func (m *MsgSignProviderAttributesResponse) String() string { return proto.CompactTextString(m) } +func (*MsgSignProviderAttributesResponse) ProtoMessage() {} +func (*MsgSignProviderAttributesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b75b0e7d403816e8, []int{5} +} +func (m *MsgSignProviderAttributesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgSignProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgSignProviderAttributesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgSignProviderAttributesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgSignProviderAttributesResponse.Merge(m, src) +} +func (m *MsgSignProviderAttributesResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgSignProviderAttributesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgSignProviderAttributesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgSignProviderAttributesResponse proto.InternalMessageInfo + +// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes +type MsgDeleteProviderAttributes struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + Auditor string `protobuf:"bytes,2,opt,name=auditor,proto3" json:"auditor" yaml:"auditor"` + Keys []string `protobuf:"bytes,3,rep,name=keys,proto3" json:"keys" yaml:"keys"` +} + +func (m *MsgDeleteProviderAttributes) Reset() { *m = MsgDeleteProviderAttributes{} } +func (m *MsgDeleteProviderAttributes) String() string { return proto.CompactTextString(m) } +func (*MsgDeleteProviderAttributes) ProtoMessage() {} +func (*MsgDeleteProviderAttributes) Descriptor() ([]byte, []int) { + return fileDescriptor_b75b0e7d403816e8, []int{6} +} +func (m *MsgDeleteProviderAttributes) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDeleteProviderAttributes) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDeleteProviderAttributes.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDeleteProviderAttributes) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDeleteProviderAttributes.Merge(m, src) +} +func (m *MsgDeleteProviderAttributes) XXX_Size() int { + return m.Size() +} +func (m *MsgDeleteProviderAttributes) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDeleteProviderAttributes.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDeleteProviderAttributes proto.InternalMessageInfo + +func (m *MsgDeleteProviderAttributes) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *MsgDeleteProviderAttributes) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *MsgDeleteProviderAttributes) GetKeys() []string { + if m != nil { + return m.Keys + } + return nil +} + +// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. +type MsgDeleteProviderAttributesResponse struct { +} + +func (m *MsgDeleteProviderAttributesResponse) Reset() { *m = MsgDeleteProviderAttributesResponse{} } +func (m *MsgDeleteProviderAttributesResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDeleteProviderAttributesResponse) ProtoMessage() {} +func (*MsgDeleteProviderAttributesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b75b0e7d403816e8, []int{7} +} +func (m *MsgDeleteProviderAttributesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDeleteProviderAttributesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDeleteProviderAttributesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDeleteProviderAttributesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDeleteProviderAttributesResponse.Merge(m, src) +} +func (m *MsgDeleteProviderAttributesResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDeleteProviderAttributesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDeleteProviderAttributesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDeleteProviderAttributesResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Provider)(nil), "akash.audit.v1beta4.Provider") + proto.RegisterType((*AuditedAttributes)(nil), "akash.audit.v1beta4.AuditedAttributes") + proto.RegisterType((*AttributesResponse)(nil), "akash.audit.v1beta4.AttributesResponse") + proto.RegisterType((*AttributesFilters)(nil), "akash.audit.v1beta4.AttributesFilters") + proto.RegisterType((*MsgSignProviderAttributes)(nil), "akash.audit.v1beta4.MsgSignProviderAttributes") + proto.RegisterType((*MsgSignProviderAttributesResponse)(nil), "akash.audit.v1beta4.MsgSignProviderAttributesResponse") + proto.RegisterType((*MsgDeleteProviderAttributes)(nil), "akash.audit.v1beta4.MsgDeleteProviderAttributes") + proto.RegisterType((*MsgDeleteProviderAttributesResponse)(nil), "akash.audit.v1beta4.MsgDeleteProviderAttributesResponse") +} + +func init() { proto.RegisterFile("akash/audit/v1beta4/audit.proto", fileDescriptor_b75b0e7d403816e8) } + +var fileDescriptor_b75b0e7d403816e8 = []byte{ + // 580 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0xbf, 0x6f, 0xd3, 0x40, + 0x18, 0xcd, 0x39, 0xa1, 0x24, 0x57, 0x7e, 0x28, 0x06, 0x41, 0x9a, 0x0a, 0x5f, 0xb9, 0x08, 0x88, + 0x84, 0xb0, 0x49, 0x8b, 0xa0, 0x2a, 0x53, 0x2d, 0x84, 0xc4, 0x10, 0x09, 0x99, 0x8d, 0xcd, 0x21, + 0x27, 0xd7, 0x4a, 0x9a, 0x8b, 0x7c, 0x97, 0x54, 0xd9, 0x98, 0x58, 0x81, 0x8d, 0xb1, 0x33, 0x13, + 0x2b, 0xac, 0x0c, 0x74, 0xec, 0xc8, 0x74, 0xa0, 0x64, 0x41, 0x1e, 0xfd, 0x17, 0xa0, 0xdc, 0xd9, + 0x71, 0x22, 0x6a, 0x44, 0xa6, 0x0a, 0xa9, 0x9b, 0xbf, 0xf7, 0xfd, 0xb8, 0xf7, 0xbd, 0x7b, 0x27, + 0x43, 0xe4, 0x76, 0x5c, 0xb6, 0x67, 0xb9, 0x83, 0xb6, 0xcf, 0xad, 0x61, 0xa3, 0x45, 0xb8, 0xfb, + 0x40, 0x45, 0x66, 0x3f, 0xa0, 0x9c, 0xea, 0x57, 0x64, 0x81, 0xa9, 0xa0, 0xb8, 0xa0, 0x7a, 0xd5, + 0xa3, 0x1e, 0x95, 0x79, 0x6b, 0xfa, 0xa5, 0x4a, 0xab, 0x75, 0x35, 0xab, 0xe5, 0x32, 0x62, 0xb9, + 0x9c, 0x07, 0x7e, 0x6b, 0xc0, 0x09, 0xb3, 0x86, 0x8d, 0x34, 0x52, 0x95, 0xf8, 0x93, 0x06, 0x8b, + 0xcf, 0x03, 0x3a, 0xf4, 0xdb, 0x24, 0xd0, 0x2d, 0x78, 0x8e, 0x1e, 0xf4, 0x48, 0x50, 0x01, 0x1b, + 0xa0, 0x5e, 0xb2, 0xd7, 0x42, 0x81, 0x14, 0x10, 0x09, 0x74, 0x61, 0xe4, 0xee, 0x77, 0x77, 0xb0, + 0x0c, 0xb1, 0xa3, 0x60, 0xfd, 0x11, 0x3c, 0x2f, 0xe9, 0xd0, 0xa0, 0xa2, 0xc9, 0x96, 0x1b, 0xa1, + 0x40, 0x09, 0x14, 0x09, 0x74, 0x49, 0x35, 0xc5, 0x00, 0x76, 0x92, 0x94, 0xfe, 0x19, 0x40, 0x98, + 0x12, 0xab, 0x14, 0x36, 0xf2, 0xf5, 0xd5, 0xcd, 0x9a, 0xa9, 0x36, 0x9c, 0xd2, 0x36, 0xd3, 0xac, + 0x39, 0x6c, 0x98, 0xbb, 0x49, 0x64, 0xf3, 0x23, 0x81, 0x72, 0xa1, 0x40, 0x73, 0xed, 0x91, 0x40, + 0xe5, 0xf8, 0xa0, 0x19, 0x86, 0x3f, 0xfe, 0x40, 0xcf, 0x3c, 0x9f, 0xef, 0x0d, 0x5a, 0xe6, 0x2b, + 0xba, 0x6f, 0xc9, 0xc9, 0xf7, 0x7a, 0x84, 0x1f, 0xd0, 0xa0, 0x13, 0x47, 0x6e, 0xdf, 0xb7, 0x3c, + 0x6a, 0xf5, 0x68, 0x9b, 0x58, 0x7c, 0xd4, 0x27, 0x6c, 0x51, 0xab, 0xf4, 0x50, 0xe6, 0xcc, 0x9d, + 0x86, 0xbf, 0x6a, 0xb0, 0xbc, 0x3b, 0xdd, 0x83, 0xb4, 0xd3, 0x8a, 0xd3, 0xd3, 0x2e, 0xff, 0x1f, + 0x69, 0xb7, 0x53, 0xfc, 0x70, 0x88, 0xc0, 0xaf, 0x43, 0x94, 0xc3, 0x6f, 0x01, 0xd4, 0xe7, 0x8a, + 0x08, 0xeb, 0xd3, 0x1e, 0x23, 0x3a, 0x5d, 0xd8, 0x0d, 0xc8, 0xdd, 0x6e, 0x9b, 0x27, 0x38, 0xdf, + 0xfc, 0xe3, 0x0a, 0xec, 0x3b, 0xff, 0xb8, 0x5e, 0x36, 0xa3, 0x72, 0x3a, 0xed, 0xa9, 0xdf, 0xe5, + 0x24, 0x60, 0xfa, 0x63, 0x58, 0x8c, 0x85, 0x57, 0x74, 0x4a, 0x36, 0x0a, 0x05, 0x9a, 0x61, 0x91, + 0x40, 0x97, 0x17, 0x2e, 0x8a, 0x61, 0x67, 0x96, 0xd4, 0xb7, 0xe0, 0x8a, 0xbc, 0x6c, 0x56, 0xd1, + 0x64, 0xeb, 0x7a, 0x28, 0x50, 0x8c, 0x44, 0x02, 0x5d, 0x9c, 0xb3, 0x05, 0xc3, 0x4e, 0x9c, 0x98, + 0x63, 0xf4, 0x4d, 0x83, 0x6b, 0x4d, 0xe6, 0xbd, 0xf0, 0xbd, 0x5e, 0xf2, 0x46, 0xcf, 0x1c, 0xb7, + 0xa4, 0xe3, 0x0a, 0x52, 0xc9, 0x1a, 0xbc, 0x99, 0x29, 0x64, 0xe2, 0x3d, 0xfc, 0x05, 0xc0, 0xf5, + 0x26, 0xf3, 0x9e, 0x90, 0x2e, 0xe1, 0xe4, 0x54, 0x05, 0xbf, 0x0b, 0x0b, 0x1d, 0x32, 0x52, 0x4a, + 0x97, 0xec, 0xeb, 0xa1, 0x40, 0x32, 0x8e, 0x04, 0x5a, 0x55, 0x2d, 0xd3, 0x08, 0x3b, 0x12, 0x8c, + 0x37, 0xbc, 0x05, 0x6b, 0x7f, 0xe1, 0x9e, 0xec, 0xb8, 0xf9, 0x5e, 0x83, 0xf9, 0x26, 0xf3, 0xf4, + 0xd7, 0x00, 0x5e, 0xcb, 0xf0, 0x95, 0x79, 0xe2, 0x73, 0xcb, 0x94, 0xaf, 0xfa, 0x70, 0xb9, 0xfa, + 0xd9, 0x53, 0x7f, 0x03, 0x60, 0x25, 0x53, 0xeb, 0xfb, 0x59, 0x43, 0xb3, 0x3a, 0xaa, 0xdb, 0xcb, + 0x76, 0x24, 0x44, 0x6c, 0xe7, 0x68, 0x6c, 0x80, 0xe3, 0xb1, 0x01, 0x7e, 0x8e, 0x0d, 0xf0, 0x6e, + 0x62, 0xe4, 0x8e, 0x27, 0x46, 0xee, 0xfb, 0xc4, 0xc8, 0xbd, 0xdc, 0x5e, 0xc2, 0x93, 0x0b, 0x3f, + 0xee, 0xd6, 0x8a, 0xfc, 0xbd, 0x6e, 0xfd, 0x0e, 0x00, 0x00, 0xff, 0xff, 0xf4, 0x98, 0xb4, 0x0e, + 0xd6, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // SignProviderAttributes defines a method that signs provider attributes + SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) + // DeleteProviderAttributes defines a method that deletes provider attributes + DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) SignProviderAttributes(ctx context.Context, in *MsgSignProviderAttributes, opts ...grpc.CallOption) (*MsgSignProviderAttributesResponse, error) { + out := new(MsgSignProviderAttributesResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1beta4.Msg/SignProviderAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) DeleteProviderAttributes(ctx context.Context, in *MsgDeleteProviderAttributes, opts ...grpc.CallOption) (*MsgDeleteProviderAttributesResponse, error) { + out := new(MsgDeleteProviderAttributesResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1beta4.Msg/DeleteProviderAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // SignProviderAttributes defines a method that signs provider attributes + SignProviderAttributes(context.Context, *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) + // DeleteProviderAttributes defines a method that deletes provider attributes + DeleteProviderAttributes(context.Context, *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) SignProviderAttributes(ctx context.Context, req *MsgSignProviderAttributes) (*MsgSignProviderAttributesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SignProviderAttributes not implemented") +} +func (*UnimplementedMsgServer) DeleteProviderAttributes(ctx context.Context, req *MsgDeleteProviderAttributes) (*MsgDeleteProviderAttributesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteProviderAttributes not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_SignProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgSignProviderAttributes) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).SignProviderAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1beta4.Msg/SignProviderAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).SignProviderAttributes(ctx, req.(*MsgSignProviderAttributes)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_DeleteProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgDeleteProviderAttributes) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).DeleteProviderAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1beta4.Msg/DeleteProviderAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).DeleteProviderAttributes(ctx, req.(*MsgDeleteProviderAttributes)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.audit.v1beta4.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SignProviderAttributes", + Handler: _Msg_SignProviderAttributes_Handler, + }, + { + MethodName: "DeleteProviderAttributes", + Handler: _Msg_DeleteProviderAttributes_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/audit/v1beta4/audit.proto", +} + +func (m *Provider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Provider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAudit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AuditedAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AuditedAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AuditedAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAudit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *AttributesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAudit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *AttributesFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *AttributesFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *AttributesFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owners) > 0 { + for iNdEx := len(m.Owners) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Owners[iNdEx]) + copy(dAtA[i:], m.Owners[iNdEx]) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Owners[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.Auditors) > 0 { + for iNdEx := len(m.Auditors) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Auditors[iNdEx]) + copy(dAtA[i:], m.Auditors[iNdEx]) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditors[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *MsgSignProviderAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSignProviderAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSignProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAudit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgSignProviderAttributesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgSignProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgSignProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgDeleteProviderAttributes) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDeleteProviderAttributes) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDeleteProviderAttributes) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Keys) > 0 { + for iNdEx := len(m.Keys) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Keys[iNdEx]) + copy(dAtA[i:], m.Keys[iNdEx]) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Keys[iNdEx]))) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintAudit(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgDeleteProviderAttributesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDeleteProviderAttributesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDeleteProviderAttributesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintAudit(dAtA []byte, offset int, v uint64) int { + offset -= sovAudit(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Provider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovAudit(uint64(l)) + } + } + return n +} + +func (m *AuditedAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovAudit(uint64(l)) + } + } + return n +} + +func (m *AttributesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovAudit(uint64(l)) + } + } + return n +} + +func (m *AttributesFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Auditors) > 0 { + for _, s := range m.Auditors { + l = len(s) + n += 1 + l + sovAudit(uint64(l)) + } + } + if len(m.Owners) > 0 { + for _, s := range m.Owners { + l = len(s) + n += 1 + l + sovAudit(uint64(l)) + } + } + return n +} + +func (m *MsgSignProviderAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovAudit(uint64(l)) + } + } + return n +} + +func (m *MsgSignProviderAttributesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgDeleteProviderAttributes) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovAudit(uint64(l)) + } + if len(m.Keys) > 0 { + for _, s := range m.Keys { + l = len(s) + n += 1 + l + sovAudit(uint64(l)) + } + } + return n +} + +func (m *MsgDeleteProviderAttributesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovAudit(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAudit(x uint64) (n int) { + return sovAudit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Provider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Provider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AuditedAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AuditedAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AuditedAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, AuditedAttributes{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *AttributesFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: AttributesFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: AttributesFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditors", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditors = append(m.Auditors, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owners", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owners = append(m.Owners, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgSignProviderAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSignProviderAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSignProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgSignProviderAttributesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgSignProviderAttributesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgSignProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDeleteProviderAttributes) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDeleteProviderAttributes: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDeleteProviderAttributes: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Keys", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAudit + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAudit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Keys = append(m.Keys, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDeleteProviderAttributesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAudit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDeleteProviderAttributesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipAudit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAudit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAudit(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAudit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAudit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAudit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAudit + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAudit + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAudit + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAudit = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAudit = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAudit = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/audit/v1beta4/codec.go b/go/node/audit/v1beta4/codec.go new file mode 100644 index 00000000..fcb6f4ad --- /dev/null +++ b/go/node/audit/v1beta4/codec.go @@ -0,0 +1,43 @@ +package v1beta4 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + amino = codec.NewLegacyAmino() + + // ModuleCdc references the global x/audit module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/provider and + // defined at the application level. + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +func init() { + RegisterLegacyAminoCodec(amino) + cryptocodec.RegisterCrypto(amino) + amino.Seal() +} + +// RegisterLegacyAminoCodec register concrete types on codec +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgSignProviderAttributes{}, ModuleName+"/"+MsgTypeSignProviderAttributes, nil) + cdc.RegisterConcrete(&MsgDeleteProviderAttributes{}, ModuleName+"/"+MsgTypeDeleteProviderAttributes, nil) +} + +// RegisterInterfaces registers the x/provider interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgSignProviderAttributes{}, + &MsgDeleteProviderAttributes{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/audit/v1beta4/errors.go b/go/node/audit/v1beta4/errors.go new file mode 100644 index 00000000..17ab735f --- /dev/null +++ b/go/node/audit/v1beta4/errors.go @@ -0,0 +1,22 @@ +package v1beta4 + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + errProviderNotFound uint32 = iota + 1 + errInvalidAddress + errAttributeNotFound +) + +var ( + // ErrProviderNotFound provider not found + ErrProviderNotFound = sdkerrors.Register(ModuleName, errProviderNotFound, "invalid provider: address not found") + + // ErrInvalidAddress invalid trusted auditor address + ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") + + // ErrAttributeNotFound invalid trusted auditor address + ErrAttributeNotFound = sdkerrors.Register(ModuleName, errAttributeNotFound, "attribute not found") +) diff --git a/go/node/audit/v1beta4/event.go b/go/node/audit/v1beta4/event.go new file mode 100644 index 00000000..f1ee63b1 --- /dev/null +++ b/go/node/audit/v1beta4/event.go @@ -0,0 +1,118 @@ +package v1beta4 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/akash-network/akash-api/go/sdkutil" +) + +const ( + evActionTrustedAuditorCreated = "audit-trusted-auditor-created" + evActionTrustedAuditorDeleted = "audit-trusted-auditor-deleted" + evOwnerKey = "owner" + evAuditorKey = "auditor" +) + +// EventTrustedAuditorCreated struct +type EventTrustedAuditorCreated struct { + Context sdkutil.BaseModuleEvent `json:"context"` + Owner sdk.Address `json:"owner"` + Auditor sdk.Address `json:"auditor"` +} + +func NewEventTrustedAuditorCreated(owner sdk.Address, auditor sdk.Address) EventTrustedAuditorCreated { + return EventTrustedAuditorCreated{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionTrustedAuditorCreated, + }, + Owner: owner, + Auditor: auditor, + } +} + +// ToSDKEvent method creates new sdk event for EventProviderCreated struct +func (ev EventTrustedAuditorCreated) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionTrustedAuditorCreated), + }, TrustedAuditorEVAttributes(ev.Owner, ev.Auditor)...)..., + ) +} + +// TrustedAuditorEVAttributes returns event attributes for given Provider +func TrustedAuditorEVAttributes(owner sdk.Address, auditor sdk.Address) []sdk.Attribute { + return []sdk.Attribute{ + sdk.NewAttribute(evOwnerKey, owner.String()), + sdk.NewAttribute(evAuditorKey, auditor.String()), + } +} + +// ParseEVTTrustedAuditor returns provider details for given event attributes +func ParseEVTTrustedAuditor(attrs []sdk.Attribute) (sdk.Address, sdk.Address, error) { + owner, err := sdkutil.GetAccAddress(attrs, evOwnerKey) + if err != nil { + return nil, nil, err + } + + auditor, err := sdkutil.GetAccAddress(attrs, evAuditorKey) + if err != nil { + return nil, nil, err + } + + return owner, auditor, nil +} + +type EventTrustedAuditorDeleted struct { + Context sdkutil.BaseModuleEvent `json:"context"` + Owner sdk.Address `json:"owner"` + Auditor sdk.Address `json:"auditor"` +} + +func NewEventTrustedAuditorDeleted(owner sdk.Address, auditor sdk.Address) EventTrustedAuditorDeleted { + return EventTrustedAuditorDeleted{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionTrustedAuditorDeleted, + }, + Owner: owner, + Auditor: auditor, + } +} + +// ToSDKEvent method creates new sdk event for EventProviderCreated struct +func (ev EventTrustedAuditorDeleted) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionTrustedAuditorDeleted), + }, TrustedAuditorEVAttributes(ev.Owner, ev.Auditor)...)..., + ) +} + +// ParseEvent parses event and returns details of event and error if occurred +func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { + if ev.Type != sdkutil.EventTypeMessage { + return nil, sdkutil.ErrUnknownType + } + if ev.Module != ModuleName { + return nil, sdkutil.ErrUnknownModule + } + switch ev.Action { + case evActionTrustedAuditorCreated: + owner, auditor, err := ParseEVTTrustedAuditor(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventTrustedAuditorCreated(owner, auditor), nil + case evActionTrustedAuditorDeleted: + owner, auditor, err := ParseEVTTrustedAuditor(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventTrustedAuditorDeleted(owner, auditor), nil + default: + return nil, sdkutil.ErrUnknownAction + } +} diff --git a/go/node/audit/v1beta4/genesis.pb.go b/go/node/audit/v1beta4/genesis.pb.go new file mode 100644 index 00000000..e9e73aec --- /dev/null +++ b/go/node/audit/v1beta4/genesis.pb.go @@ -0,0 +1,332 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/audit/v1beta4/genesis.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the basic genesis state used by audit module +type GenesisState struct { + Attributes []AuditedAttributes `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes" yaml:"attributes"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_8765efef2ccff99f, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetAttributes() []AuditedAttributes { + if m != nil { + return m.Attributes + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "akash.audit.v1beta4.GenesisState") +} + +func init() { proto.RegisterFile("akash/audit/v1beta4/genesis.proto", fileDescriptor_8765efef2ccff99f) } + +var fileDescriptor_8765efef2ccff99f = []byte{ + // 235 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x2c, 0x4d, 0xc9, 0x2c, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, 0xd1, + 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x06, + 0x2b, 0xd1, 0x03, 0x2b, 0xd1, 0x83, 0x2a, 0x91, 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xcb, 0xeb, + 0x83, 0x58, 0x10, 0xa5, 0x52, 0xf2, 0xd8, 0x4c, 0x83, 0x68, 0x04, 0x2b, 0x50, 0xaa, 0xe7, 0xe2, + 0x71, 0x87, 0x18, 0x1e, 0x5c, 0x92, 0x58, 0x92, 0x2a, 0x94, 0xcf, 0xc5, 0x95, 0x58, 0x52, 0x52, + 0x94, 0x99, 0x54, 0x5a, 0x92, 0x5a, 0x2c, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0xa4, 0xa6, 0x87, + 0xc5, 0x42, 0x3d, 0x47, 0x10, 0x2f, 0x35, 0xc5, 0x11, 0xae, 0xda, 0x49, 0xfd, 0xc4, 0x3d, 0x79, + 0x86, 0x57, 0xf7, 0xe4, 0x91, 0x4c, 0xf8, 0x74, 0x4f, 0x5e, 0xb0, 0x32, 0x31, 0x37, 0xc7, 0x4a, + 0x09, 0x21, 0xa6, 0x14, 0x84, 0xa4, 0xc0, 0x29, 0xe8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, + 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, + 0xe5, 0x18, 0xa2, 0x2c, 0xd2, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, + 0x0e, 0xd0, 0xcd, 0x4b, 0x2d, 0x29, 0xcf, 0x2f, 0xca, 0x86, 0xf2, 0x12, 0x0b, 0x32, 0xf5, 0xd3, + 0xf3, 0xf5, 0xf3, 0xf2, 0x53, 0x52, 0x51, 0x3d, 0x98, 0xc4, 0x06, 0xf6, 0x9b, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0xb9, 0x56, 0xa5, 0xbe, 0x4c, 0x01, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, AuditedAttributes{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/audit/v1beta4/key.go b/go/node/audit/v1beta4/key.go new file mode 100644 index 00000000..4d1b760d --- /dev/null +++ b/go/node/audit/v1beta4/key.go @@ -0,0 +1,16 @@ +package v1beta4 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "audit" + + // StoreKey is the store key string for provider + StoreKey = ModuleName + + // RouterKey is the message route for provider + RouterKey = ModuleName +) + +func PrefixProviderID() []byte { + return []byte{0x01} +} diff --git a/go/node/audit/v1beta4/msgs.go b/go/node/audit/v1beta4/msgs.go new file mode 100644 index 00000000..2f7f7645 --- /dev/null +++ b/go/node/audit/v1beta4/msgs.go @@ -0,0 +1,94 @@ +package v1beta4 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + MsgTypeSignProviderAttributes = "audit-sign-provider-attributes" + MsgTypeDeleteProviderAttributes = "audit-delete-provider-attributes" +) + +var ( + _ sdk.Msg = &MsgSignProviderAttributes{} + _ sdk.Msg = &MsgDeleteProviderAttributes{} +) + +// ====MsgSignProviderAttributes==== +// Route implements the sdk.Msg interface +func (m MsgSignProviderAttributes) Route() string { + return RouterKey +} + +// Type implements the sdk.Msg interface +func (m MsgSignProviderAttributes) Type() string { + return MsgTypeSignProviderAttributes +} + +// ValidateBasic does basic validation +func (m MsgSignProviderAttributes) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") + } + + if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Auditor Address") + } + + return nil +} + +// GetSignBytes encodes the message for signing +func (m MsgSignProviderAttributes) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) +} + +// GetSigners defines whose signature is required +func (m MsgSignProviderAttributes) GetSigners() []sdk.AccAddress { + auditor, err := sdk.AccAddressFromBech32(m.Auditor) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{auditor} +} + +// ====MsgRevokeProviderAttributes==== +// Route implements the sdk.Msg interface +func (m MsgDeleteProviderAttributes) Route() string { + return RouterKey +} + +// Type implements the sdk.Msg interface +func (m MsgDeleteProviderAttributes) Type() string { + return MsgTypeDeleteProviderAttributes +} + +// ValidateBasic does basic validation +func (m MsgDeleteProviderAttributes) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(m.Owner); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Owner Address") + } + + if _, err := sdk.AccAddressFromBech32(m.Auditor); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Auditor Address") + } + + return nil +} + +// GetSignBytes encodes the message for signing +func (m MsgDeleteProviderAttributes) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&m)) +} + +// GetSigners defines whose signature is required +func (m MsgDeleteProviderAttributes) GetSigners() []sdk.AccAddress { + auditor, err := sdk.AccAddressFromBech32(m.Auditor) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{auditor} +} diff --git a/go/node/audit/v1beta4/query.pb.go b/go/node/audit/v1beta4/query.pb.go new file mode 100644 index 00000000..6eac5200 --- /dev/null +++ b/go/node/audit/v1beta4/query.pb.go @@ -0,0 +1,1718 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/audit/v1beta4/query.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryProvidersResponse is response type for the Query/Providers RPC method +type QueryProvidersResponse struct { + Providers Providers `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=Providers" json:"providers"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryProvidersResponse) Reset() { *m = QueryProvidersResponse{} } +func (m *QueryProvidersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryProvidersResponse) ProtoMessage() {} +func (*QueryProvidersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_70c8ef9c680a758f, []int{0} +} +func (m *QueryProvidersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProvidersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProvidersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProvidersResponse.Merge(m, src) +} +func (m *QueryProvidersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryProvidersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProvidersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProvidersResponse proto.InternalMessageInfo + +func (m *QueryProvidersResponse) GetProviders() Providers { + if m != nil { + return m.Providers + } + return nil +} + +func (m *QueryProvidersResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProviderRequest is request type for the Query/Provider RPC method +type QueryProviderRequest struct { + Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (m *QueryProviderRequest) Reset() { *m = QueryProviderRequest{} } +func (m *QueryProviderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProviderRequest) ProtoMessage() {} +func (*QueryProviderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_70c8ef9c680a758f, []int{1} +} +func (m *QueryProviderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderRequest.Merge(m, src) +} +func (m *QueryProviderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderRequest proto.InternalMessageInfo + +func (m *QueryProviderRequest) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *QueryProviderRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method +type QueryAllProvidersAttributesRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAllProvidersAttributesRequest) Reset() { *m = QueryAllProvidersAttributesRequest{} } +func (m *QueryAllProvidersAttributesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAllProvidersAttributesRequest) ProtoMessage() {} +func (*QueryAllProvidersAttributesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_70c8ef9c680a758f, []int{2} +} +func (m *QueryAllProvidersAttributesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAllProvidersAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAllProvidersAttributesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAllProvidersAttributesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAllProvidersAttributesRequest.Merge(m, src) +} +func (m *QueryAllProvidersAttributesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAllProvidersAttributesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAllProvidersAttributesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAllProvidersAttributesRequest proto.InternalMessageInfo + +func (m *QueryAllProvidersAttributesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProviderAttributesRequest is request type for the Query/Provider RPC method +type QueryProviderAttributesRequest struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryProviderAttributesRequest) Reset() { *m = QueryProviderAttributesRequest{} } +func (m *QueryProviderAttributesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProviderAttributesRequest) ProtoMessage() {} +func (*QueryProviderAttributesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_70c8ef9c680a758f, []int{3} +} +func (m *QueryProviderAttributesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderAttributesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderAttributesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderAttributesRequest.Merge(m, src) +} +func (m *QueryProviderAttributesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderAttributesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderAttributesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderAttributesRequest proto.InternalMessageInfo + +func (m *QueryProviderAttributesRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *QueryProviderAttributesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProviderAuditorRequest is request type for the Query/Providers RPC method +type QueryProviderAuditorRequest struct { + Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` + Owner string `protobuf:"bytes,2,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (m *QueryProviderAuditorRequest) Reset() { *m = QueryProviderAuditorRequest{} } +func (m *QueryProviderAuditorRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProviderAuditorRequest) ProtoMessage() {} +func (*QueryProviderAuditorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_70c8ef9c680a758f, []int{4} +} +func (m *QueryProviderAuditorRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderAuditorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderAuditorRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderAuditorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderAuditorRequest.Merge(m, src) +} +func (m *QueryProviderAuditorRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderAuditorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderAuditorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderAuditorRequest proto.InternalMessageInfo + +func (m *QueryProviderAuditorRequest) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *QueryProviderAuditorRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// QueryAuditorAttributesRequest is request type for the Query/Providers RPC method +type QueryAuditorAttributesRequest struct { + Auditor string `protobuf:"bytes,1,opt,name=auditor,proto3" json:"auditor,omitempty"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryAuditorAttributesRequest) Reset() { *m = QueryAuditorAttributesRequest{} } +func (m *QueryAuditorAttributesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryAuditorAttributesRequest) ProtoMessage() {} +func (*QueryAuditorAttributesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_70c8ef9c680a758f, []int{5} +} +func (m *QueryAuditorAttributesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryAuditorAttributesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryAuditorAttributesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryAuditorAttributesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryAuditorAttributesRequest.Merge(m, src) +} +func (m *QueryAuditorAttributesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryAuditorAttributesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryAuditorAttributesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryAuditorAttributesRequest proto.InternalMessageInfo + +func (m *QueryAuditorAttributesRequest) GetAuditor() string { + if m != nil { + return m.Auditor + } + return "" +} + +func (m *QueryAuditorAttributesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +func init() { + proto.RegisterType((*QueryProvidersResponse)(nil), "akash.audit.v1beta4.QueryProvidersResponse") + proto.RegisterType((*QueryProviderRequest)(nil), "akash.audit.v1beta4.QueryProviderRequest") + proto.RegisterType((*QueryAllProvidersAttributesRequest)(nil), "akash.audit.v1beta4.QueryAllProvidersAttributesRequest") + proto.RegisterType((*QueryProviderAttributesRequest)(nil), "akash.audit.v1beta4.QueryProviderAttributesRequest") + proto.RegisterType((*QueryProviderAuditorRequest)(nil), "akash.audit.v1beta4.QueryProviderAuditorRequest") + proto.RegisterType((*QueryAuditorAttributesRequest)(nil), "akash.audit.v1beta4.QueryAuditorAttributesRequest") +} + +func init() { proto.RegisterFile("akash/audit/v1beta4/query.proto", fileDescriptor_70c8ef9c680a758f) } + +var fileDescriptor_70c8ef9c680a758f = []byte{ + // 567 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x95, 0x41, 0x6f, 0xd3, 0x30, + 0x14, 0xc7, 0xeb, 0xa2, 0x82, 0xea, 0x9d, 0x66, 0xaa, 0xa9, 0x14, 0x96, 0x56, 0x3d, 0x40, 0x35, + 0xc0, 0xde, 0xb2, 0x89, 0x01, 0x17, 0xb4, 0x1d, 0xc6, 0x09, 0x34, 0x72, 0xe4, 0xe6, 0xae, 0x56, + 0x16, 0xad, 0xcb, 0xcb, 0x62, 0x67, 0x13, 0x42, 0x43, 0x82, 0x4f, 0x80, 0xc4, 0xb7, 0x40, 0x5c, + 0xe0, 0xca, 0x17, 0xd8, 0x71, 0x12, 0x17, 0x4e, 0x03, 0xb5, 0x7c, 0x10, 0x14, 0x3b, 0x69, 0x9b, + 0xa5, 0xa5, 0xdd, 0xd8, 0xad, 0xae, 0xff, 0xef, 0xfd, 0x7f, 0xef, 0xf9, 0xd9, 0xc1, 0x75, 0xbe, + 0xc7, 0xe5, 0x2e, 0xe3, 0x51, 0xc7, 0x53, 0xec, 0x70, 0xa5, 0x2d, 0x14, 0x5f, 0x63, 0x07, 0x91, + 0x08, 0xdf, 0xd0, 0x20, 0x04, 0x05, 0xe4, 0xa6, 0x16, 0x50, 0x2d, 0xa0, 0x89, 0xa0, 0x56, 0x71, + 0xc1, 0x05, 0xbd, 0xcf, 0xe2, 0x5f, 0x46, 0x5a, 0xbb, 0xe3, 0x02, 0xb8, 0x5d, 0xc1, 0x78, 0xe0, + 0x31, 0xee, 0xfb, 0xa0, 0xb8, 0xf2, 0xc0, 0x97, 0xc9, 0xee, 0xd2, 0x0e, 0xc8, 0x7d, 0x90, 0xac, + 0xcd, 0xa5, 0x30, 0x0e, 0x89, 0xdf, 0x0a, 0x0b, 0xb8, 0xeb, 0xf9, 0x5a, 0x9c, 0x68, 0xc7, 0x52, + 0x19, 0x04, 0x2d, 0x68, 0x7e, 0x45, 0x78, 0xe1, 0x55, 0x9c, 0x63, 0x3b, 0x84, 0x43, 0xaf, 0x23, + 0x42, 0xe9, 0x08, 0x19, 0x80, 0x2f, 0x05, 0x79, 0x89, 0xcb, 0x41, 0xfa, 0x67, 0x15, 0x35, 0xae, + 0xb5, 0xe6, 0xec, 0x45, 0x3a, 0xa6, 0x08, 0x9a, 0x86, 0x6e, 0xce, 0x9f, 0x9c, 0xd5, 0x0b, 0x9f, + 0x7f, 0xd5, 0xcb, 0xc3, 0x64, 0xc3, 0x14, 0xe4, 0x39, 0xc6, 0x43, 0xbe, 0x6a, 0xb1, 0x81, 0x5a, + 0x73, 0xf6, 0x3d, 0x6a, 0x8a, 0xa1, 0x71, 0x31, 0xd4, 0xb4, 0x2b, 0x29, 0x86, 0x6e, 0x73, 0x57, + 0xa4, 0x30, 0xce, 0x48, 0x68, 0x73, 0x0b, 0x57, 0x32, 0xc8, 0x8e, 0x38, 0x88, 0x84, 0x54, 0xa4, + 0x8a, 0x6f, 0x68, 0x30, 0x08, 0xab, 0xa8, 0x81, 0x5a, 0x65, 0x27, 0x5d, 0x92, 0x0a, 0x2e, 0xc1, + 0x91, 0x2f, 0x42, 0xed, 0x5a, 0x76, 0xcc, 0xa2, 0xd9, 0xc5, 0x4d, 0x9d, 0x67, 0xa3, 0xdb, 0x1d, + 0x00, 0x6f, 0x28, 0x15, 0x7a, 0xed, 0x48, 0x09, 0x99, 0x66, 0xdd, 0xca, 0x60, 0x23, 0x8d, 0x7d, + 0x77, 0x2a, 0xb6, 0x8e, 0xcd, 0x50, 0xbf, 0xc3, 0x56, 0x86, 0x3a, 0xef, 0x34, 0xa0, 0x44, 0x23, + 0x94, 0xe7, 0xfc, 0x8b, 0x97, 0xf6, 0x7f, 0x81, 0x6f, 0x67, 0xfd, 0x4d, 0x6f, 0x2e, 0xdb, 0xbc, + 0xf7, 0x08, 0x2f, 0x9a, 0xee, 0x19, 0x59, 0xbe, 0x9c, 0xc9, 0x19, 0xaf, 0xa8, 0x24, 0xfb, 0xac, + 0x84, 0x4b, 0x9a, 0x81, 0x7c, 0x43, 0x78, 0x61, 0xfc, 0x31, 0x92, 0xf5, 0xb1, 0x33, 0x3b, 0xfd, + 0xe0, 0x6b, 0xf7, 0x27, 0x07, 0xe6, 0x2e, 0x4b, 0xd3, 0xfe, 0xf0, 0xe3, 0xcf, 0xa7, 0xe2, 0x03, + 0xb2, 0xc4, 0x26, 0xde, 0x38, 0xc6, 0x07, 0x16, 0xac, 0xeb, 0x49, 0x15, 0x43, 0x93, 0xfc, 0x34, + 0x90, 0xd5, 0xe9, 0xbe, 0xff, 0x09, 0xfb, 0x54, 0xc3, 0xae, 0x11, 0x7b, 0x36, 0xd8, 0xb7, 0xfa, + 0xc4, 0x8f, 0x0d, 0xf4, 0x77, 0x84, 0x6f, 0x9d, 0x1b, 0xa1, 0x11, 0xf6, 0xe5, 0x19, 0xd8, 0x33, + 0x73, 0x77, 0x31, 0xf0, 0x67, 0x1a, 0xfc, 0x09, 0x59, 0x9f, 0x11, 0x3c, 0x19, 0xb8, 0xe3, 0xb4, + 0x04, 0xf2, 0x05, 0xe1, 0xf9, 0x3c, 0xb5, 0xfd, 0x8f, 0x11, 0x99, 0x30, 0xdd, 0x17, 0xe3, 0x7e, + 0xa4, 0xb9, 0x97, 0x09, 0x4d, 0xb8, 0xd3, 0x47, 0x31, 0x8b, 0x0e, 0xe1, 0x08, 0x71, 0xdc, 0xec, + 0x4d, 0xe7, 0xa4, 0x67, 0xa1, 0xd3, 0x9e, 0x85, 0x7e, 0xf7, 0x2c, 0xf4, 0xb1, 0x6f, 0x15, 0x4e, + 0xfb, 0x56, 0xe1, 0x67, 0xdf, 0x2a, 0xbc, 0x7e, 0xec, 0x7a, 0x6a, 0x37, 0x6a, 0xd3, 0x1d, 0xd8, + 0x37, 0x39, 0x1f, 0xfa, 0x42, 0x1d, 0x41, 0xb8, 0x97, 0xac, 0xe2, 0x4f, 0x87, 0x0b, 0xcc, 0x87, + 0x8e, 0xc8, 0x76, 0xa9, 0x7d, 0x5d, 0x3f, 0xfc, 0xab, 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0x36, + 0xf3, 0x6e, 0x52, 0xb1, 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // AllProvidersAttributes queries all providers + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) + // ProviderAttributes queries all provider signed attributes + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) + // ProviderAuditorAttributes queries provider signed attributes by specific auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) + // AuditorAttributes queries all providers signed by this auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) AllProvidersAttributes(ctx context.Context, in *QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1beta4.Query/AllProvidersAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ProviderAttributes(ctx context.Context, in *QueryProviderAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1beta4.Query/ProviderAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) ProviderAuditorAttributes(ctx context.Context, in *QueryProviderAuditorRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1beta4.Query/ProviderAuditorAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) AuditorAttributes(ctx context.Context, in *QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.audit.v1beta4.Query/AuditorAttributes", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // AllProvidersAttributes queries all providers + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + AllProvidersAttributes(context.Context, *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) + // ProviderAttributes queries all provider signed attributes + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + ProviderAttributes(context.Context, *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) + // ProviderAuditorAttributes queries provider signed attributes by specific auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + ProviderAuditorAttributes(context.Context, *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) + // AuditorAttributes queries all providers signed by this auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + AuditorAttributes(context.Context, *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) AllProvidersAttributes(ctx context.Context, req *QueryAllProvidersAttributesRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AllProvidersAttributes not implemented") +} +func (*UnimplementedQueryServer) ProviderAttributes(ctx context.Context, req *QueryProviderAttributesRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProviderAttributes not implemented") +} +func (*UnimplementedQueryServer) ProviderAuditorAttributes(ctx context.Context, req *QueryProviderAuditorRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ProviderAuditorAttributes not implemented") +} +func (*UnimplementedQueryServer) AuditorAttributes(ctx context.Context, req *QueryAuditorAttributesRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method AuditorAttributes not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_AllProvidersAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAllProvidersAttributesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AllProvidersAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1beta4.Query/AllProvidersAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AllProvidersAttributes(ctx, req.(*QueryAllProvidersAttributesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ProviderAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryProviderAttributesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ProviderAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1beta4.Query/ProviderAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ProviderAttributes(ctx, req.(*QueryProviderAttributesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_ProviderAuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryProviderAuditorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).ProviderAuditorAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1beta4.Query/ProviderAuditorAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).ProviderAuditorAttributes(ctx, req.(*QueryProviderAuditorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_AuditorAttributes_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryAuditorAttributesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).AuditorAttributes(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.audit.v1beta4.Query/AuditorAttributes", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).AuditorAttributes(ctx, req.(*QueryAuditorAttributesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.audit.v1beta4.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "AllProvidersAttributes", + Handler: _Query_AllProvidersAttributes_Handler, + }, + { + MethodName: "ProviderAttributes", + Handler: _Query_ProviderAttributes_Handler, + }, + { + MethodName: "ProviderAuditorAttributes", + Handler: _Query_ProviderAuditorAttributes_Handler, + }, + { + MethodName: "AuditorAttributes", + Handler: _Query_AuditorAttributes_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/audit/v1beta4/query.proto", +} + +func (m *QueryProvidersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProvidersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProvidersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Providers) > 0 { + for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x12 + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAllProvidersAttributesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAllProvidersAttributesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAllProvidersAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderAttributesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderAttributesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderAuditorRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderAuditorRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderAuditorRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0x12 + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryAuditorAttributesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryAuditorAttributesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryAuditorAttributesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Auditor) > 0 { + i -= len(m.Auditor) + copy(dAtA[i:], m.Auditor) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Auditor))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryProvidersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Providers) > 0 { + for _, e := range m.Providers { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAllProvidersAttributesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderAttributesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderAuditorRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryAuditorAttributesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Auditor) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryProvidersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProvidersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProvidersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Providers = append(m.Providers, Provider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAllProvidersAttributesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAllProvidersAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderAttributesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderAttributesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderAuditorRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderAuditorRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderAuditorRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryAuditorAttributesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryAuditorAttributesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryAuditorAttributesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Auditor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Auditor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/audit/v1beta4/query.pb.gw.go b/go/node/audit/v1beta4/query.pb.gw.go new file mode 100644 index 00000000..0fdbf37c --- /dev/null +++ b/go/node/audit/v1beta4/query.pb.gw.go @@ -0,0 +1,532 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/audit/v1beta4/query.proto + +/* +Package v1beta4 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1beta4 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_AllProvidersAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllProvidersAttributesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AllProvidersAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AllProvidersAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAllProvidersAttributesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AllProvidersAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.AllProvidersAttributes(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_ProviderAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"owner": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderAttributesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.ProviderAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ProviderAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderAttributesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_ProviderAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.ProviderAttributes(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderAuditorRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["auditor"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") + } + + protoReq.Auditor, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) + } + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := client.ProviderAuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_ProviderAuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderAuditorRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["auditor"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") + } + + protoReq.Auditor, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) + } + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := server.ProviderAuditorAttributes(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_AuditorAttributes_0 = &utilities.DoubleArray{Encoding: map[string]int{"auditor": 0}, Base: []int{1, 1, 0}, Check: []int{0, 1, 2}} +) + +func request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAuditorAttributesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["auditor"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") + } + + protoReq.Auditor, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.AuditorAttributes(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_AuditorAttributes_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryAuditorAttributesRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["auditor"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "auditor") + } + + protoReq.Auditor, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "auditor", err) + } + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_AuditorAttributes_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.AuditorAttributes(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ProviderAttributes_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_AuditorAttributes_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_AllProvidersAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AllProvidersAttributes_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AllProvidersAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ProviderAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ProviderAttributes_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ProviderAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_ProviderAuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_ProviderAuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_ProviderAuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_AuditorAttributes_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_AuditorAttributes_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_AuditorAttributes_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_AllProvidersAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 2, 4}, []string{"akash", "audit", "v1beta4", "attributes", "list"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ProviderAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"akash", "audit", "v1beta4", "attributes", "owner", "list"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_ProviderAuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 1, 2, 3, 1, 0, 4, 1, 5, 4, 1, 0, 4, 1, 5, 5}, []string{"akash", "audit", "v1beta4", "attributes", "auditor", "owner"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_AuditorAttributes_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 3, 2, 4}, []string{"akash", "provider", "v1beta4", "auditor", "list"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_AllProvidersAttributes_0 = runtime.ForwardResponseMessage + + forward_Query_ProviderAttributes_0 = runtime.ForwardResponseMessage + + forward_Query_ProviderAuditorAttributes_0 = runtime.ForwardResponseMessage + + forward_Query_AuditorAttributes_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/audit/v1beta4/types.go b/go/node/audit/v1beta4/types.go new file mode 100644 index 00000000..818fabdb --- /dev/null +++ b/go/node/audit/v1beta4/types.go @@ -0,0 +1,33 @@ +package v1beta4 + +import ( + "bytes" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ProviderID struct { + Owner sdk.Address + Auditor sdk.Address +} + +// Providers is the collection of Provider +type Providers []Provider + +// String implements the Stringer interface for a Providers object. +func (obj Providers) String() string { + var buf bytes.Buffer + + const sep = "\n\n" + + for _, p := range obj { + buf.WriteString(p.String()) + buf.WriteString(sep) + } + + if len(obj) > 0 { + buf.Truncate(buf.Len() - len(sep)) + } + + return buf.String() +} diff --git a/go/node/client/client.go b/go/node/client/client.go index 7a232ea1..fa8c8f99 100644 --- a/go/node/client/client.go +++ b/go/node/client/client.go @@ -4,11 +4,13 @@ import ( "context" "errors" - sdkclient "github.com/cosmos/cosmos-sdk/client" tmjclient "github.com/tendermint/tendermint/rpc/jsonrpc/client" + sdkclient "github.com/cosmos/cosmos-sdk/client" + cltypes "github.com/akash-network/akash-api/go/node/client/types" "github.com/akash-network/akash-api/go/node/client/v1beta2" + "github.com/akash-network/akash-api/go/node/client/v1beta3" ) var ( @@ -17,7 +19,7 @@ var ( const ( // DefaultClientApiVersion indicates the default ApiVersion of the client. - DefaultClientApiVersion = "v1beta2" + DefaultClientAPIVersion = "v1beta2" ) // SetupFn defines a function that takes a parameter, ideally a Client or QueryClient. @@ -45,7 +47,7 @@ func DiscoverClient(ctx context.Context, cctx sdkclient.Context, setup SetupFn, // if client info is nil, mostly likely "akash" endpoint is not yet supported on the node // fallback to manually set version to DefaultClientApiVersion if result.ClientInfo == nil || cctx.Offline { - result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientApiVersion} + result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientAPIVersion} } var cl interface{} @@ -53,6 +55,8 @@ func DiscoverClient(ctx context.Context, cctx sdkclient.Context, setup SetupFn, switch result.ClientInfo.ApiVersion { case "v1beta2": cl, err = v1beta2.NewClient(ctx, cctx, opts...) + case "v1beta3": + cl, err = v1beta3.NewClient(ctx, cctx, opts...) default: err = ErrUnknownClientVersion } @@ -87,7 +91,7 @@ func DiscoverQueryClient(ctx context.Context, cctx sdkclient.Context, setup Setu } if result.ClientInfo == nil { - result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientApiVersion} + result.ClientInfo = &ClientInfo{ApiVersion: DefaultClientAPIVersion} } var cl interface{} @@ -95,6 +99,8 @@ func DiscoverQueryClient(ctx context.Context, cctx sdkclient.Context, setup Setu switch result.ClientInfo.ApiVersion { case "v1beta2": cl = v1beta2.NewQueryClient(cctx) + case "v1beta3": + cl = v1beta3.NewQueryClient(cctx) default: err = ErrUnknownClientVersion } diff --git a/go/node/client/testutil/v1beta3/base.go b/go/node/client/testutil/v1beta3/base.go new file mode 100644 index 00000000..c9d067b8 --- /dev/null +++ b/go/node/client/testutil/v1beta3/base.go @@ -0,0 +1,121 @@ +package testutil + +import ( + "fmt" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/libs/rand" + + atypes "github.com/akash-network/akash-api/go/node/audit/v1beta4" + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + attr "github.com/akash-network/akash-api/go/node/types/attributes/v1" + + // ensure sdkutil.init() to seal SDK config for the tests + _ "github.com/akash-network/akash-api/go/sdkutil" +) + +// CoinDenom provides ability to create coins in test functions and +// pass them into testutil functionality. +const ( + CoinDenom = "uakt" + BechPrefix = "akash" +) + +// Name generates a random name with the given prefix +func Name(_ testing.TB, prefix string) string { + return fmt.Sprintf("%s-%v", prefix, rand.Uint64()) // nolint: gosec +} + +// Hostname generates a random hostname with a "test.com" domain +func Hostname(t testing.TB) string { + return Name(t, "hostname") + ".test.com" +} + +func ProviderHostname(t testing.TB) string { + return "https://" + Hostname(t) +} + +// Attribute generates a random sdk.Attribute +func Attribute(t testing.TB) attr.Attribute { + t.Helper() + return attr.NewStringAttribute(Name(t, "attr-key"), Name(t, "attr-value")) +} + +// Attributes generates a set of sdk.Attribute +func Attributes(t testing.TB) []attr.Attribute { + t.Helper() + count := rand.Intn(10) + 1 + + vals := make([]attr.Attribute, 0, count) + for i := 0; i < count; i++ { + vals = append(vals, Attribute(t)) + } + return vals +} + +// PlacementRequirements generates placement requirements +func PlacementRequirements(t testing.TB) atypes.PlacementRequirements { + return atypes.PlacementRequirements{ + Attributes: Attributes(t), + } +} + +func RandCPUUnits() uint { + return RandRangeUint( + dtypes.GetValidationConfig().Unit.Min.CPU, + dtypes.GetValidationConfig().Unit.Max.CPU) +} + +func RandGPUUnits() uint { + return RandRangeUint( + dtypes.GetValidationConfig().Unit.Min.GPU, + dtypes.GetValidationConfig().Unit.Max.GPU) +} + +func RandMemoryQuantity() uint64 { + return RandRangeUint64( + dtypes.GetValidationConfig().Unit.Min.Memory, + dtypes.GetValidationConfig().Unit.Max.Memory) +} + +func RandStorageQuantity() uint64 { + return RandRangeUint64( + dtypes.GetValidationConfig().Unit.Min.Storage, + dtypes.GetValidationConfig().Unit.Max.Storage) +} + +// Resources produces an attribute list for populating a Group's +// 'Resources' fields. +func Resources(t testing.TB) []dtypes.ResourceUnit { + t.Helper() + count := rand.Intn(10) + 1 + + vals := make(dtypes.ResourceUnits, 0, count) + for i := 0; i < count; i++ { + coin := sdk.NewDecCoin(CoinDenom, sdk.NewInt(rand.Int63n(9999)+1)) + res := dtypes.ResourceUnit{ + Resources: types.Resources{ + ID: uint32(i) + 1, + CPU: &types.CPU{ + Units: types.NewResourceValue(uint64(dtypes.GetValidationConfig().Unit.Min.CPU)), + }, + GPU: &types.GPU{ + Units: types.NewResourceValue(uint64(dtypes.GetValidationConfig().Unit.Min.GPU)), + }, + Memory: &types.Memory{ + Quantity: types.NewResourceValue(dtypes.GetValidationConfig().Unit.Min.Memory), + }, + Storage: types.Volumes{ + types.Storage{ + Quantity: types.NewResourceValue(dtypes.GetValidationConfig().Unit.Min.Storage), + }, + }, + }, + Count: 1, + Price: coin, + } + vals = append(vals, res) + } + return vals +} diff --git a/go/node/client/testutil/v1beta3/cert.go b/go/node/client/testutil/v1beta3/cert.go new file mode 100644 index 00000000..f8b7e7ba --- /dev/null +++ b/go/node/client/testutil/v1beta3/cert.go @@ -0,0 +1,211 @@ +package testutil + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rand" + "crypto/tls" + "crypto/x509" + "crypto/x509/pkix" + "encoding/pem" + "math/big" + "net" + "testing" + "time" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" + + types "github.com/akash-network/akash-api/go/node/cert/v1beta3" + certutils "github.com/akash-network/akash-api/go/node/cert/v1beta3/utils" + clientmocks "github.com/akash-network/akash-api/go/node/client/v1beta3/mocks" +) + +type TestCertificate struct { + Cert []tls.Certificate + Serial big.Int + PEM struct { + Cert []byte + Priv []byte + Pub []byte + } +} + +type certificateOption struct { + domains []string + nbf time.Time + naf time.Time + qclient *clientmocks.QueryClient +} + +type CertificateOption func(*certificateOption) + +func CertificateOptionDomains(domains []string) CertificateOption { + return func(opt *certificateOption) { + opt.domains = domains + } +} + +func CertificateOptionNotBefore(tm time.Time) CertificateOption { + return func(opt *certificateOption) { + opt.nbf = tm + } +} + +func CertificateOptionNotAfter(tm time.Time) CertificateOption { + return func(opt *certificateOption) { + opt.naf = tm + } +} + +func CertificateOptionMocks(val *clientmocks.QueryClient) CertificateOption { + return func(opt *certificateOption) { + opt.qclient = val + } +} + +func Certificate(t testing.TB, addr sdk.Address, opts ...CertificateOption) TestCertificate { + t.Helper() + + opt := &certificateOption{} + + for _, o := range opts { + o(opt) + } + + priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatal(err) + } + + if opt.nbf.IsZero() { + opt.nbf = time.Now() + } + + if opt.naf.IsZero() { + opt.naf = opt.nbf.Add(time.Hour * 24 * 365) + } + + extKeyUsage := []x509.ExtKeyUsage{ + x509.ExtKeyUsageClientAuth, + } + + if len(opt.domains) != 0 { + extKeyUsage = append(extKeyUsage, x509.ExtKeyUsageServerAuth) + } + + template := x509.Certificate{ + SerialNumber: new(big.Int).SetInt64(time.Now().UTC().UnixNano()), + Subject: pkix.Name{ + CommonName: addr.String(), + ExtraNames: []pkix.AttributeTypeAndValue{ + { + Type: certutils.AuthVersionOID, + Value: "v0.0.1", + }, + }, + }, + Issuer: pkix.Name{ + CommonName: addr.String(), + }, + NotBefore: opt.nbf, + NotAfter: opt.naf, + KeyUsage: x509.KeyUsageDataEncipherment | x509.KeyUsageKeyEncipherment, + ExtKeyUsage: extKeyUsage, + BasicConstraintsValid: true, + } + + var ips []net.IP + + for i := len(opt.domains) - 1; i >= 0; i-- { + if ip := net.ParseIP(opt.domains[i]); ip != nil { + ips = append(ips, ip) + opt.domains = append(opt.domains[:i], opt.domains[i+1:]...) + } + } + + if len(opt.domains) != 0 || len(ips) != 0 { + template.PermittedDNSDomainsCritical = true + template.PermittedDNSDomains = opt.domains + template.DNSNames = opt.domains + template.IPAddresses = ips + } + + var certDer []byte + if certDer, err = x509.CreateCertificate(rand.Reader, &template, &template, priv.Public(), priv); err != nil { + t.Fatal(err) + } + + var keyDer []byte + if keyDer, err = x509.MarshalPKCS8PrivateKey(priv); err != nil { + t.Fatal(err) + } + + var pubKeyDer []byte + if pubKeyDer, err = x509.MarshalPKIXPublicKey(priv.Public()); err != nil { + t.Fatal(err) + } + + res := TestCertificate{ + Serial: *template.SerialNumber, + PEM: struct { + Cert []byte + Priv []byte + Pub []byte + }{ + Cert: pem.EncodeToMemory(&pem.Block{ + Type: types.PemBlkTypeCertificate, + Bytes: certDer, + }), + Priv: pem.EncodeToMemory(&pem.Block{ + Type: types.PemBlkTypeECPrivateKey, + Bytes: keyDer, + }), + Pub: pem.EncodeToMemory(&pem.Block{ + Type: types.PemBlkTypeECPublicKey, + Bytes: pubKeyDer, + }), + }, + } + + cert, err := tls.X509KeyPair(res.PEM.Cert, res.PEM.Priv) + if err != nil { + t.Fatal(err) + } + + res.Cert = append(res.Cert, cert) + + if opt.qclient != nil { + opt.qclient.On("Certificates", + mock.Anything, + &types.QueryCertificatesRequest{ + Filter: types.CertificateFilter{ + Owner: addr.String(), + Serial: res.Serial.String(), + State: "valid", + }, + }). + Return(&types.QueryCertificatesResponse{ + Certificates: types.CertificatesResponse{ + types.CertificateResponse{ + Certificate: types.Certificate{ + State: types.CertificateValid, + Cert: res.PEM.Cert, + Pubkey: res.PEM.Pub, + }, + Serial: res.Serial.String(), + }, + }, + }, nil) + } + return res +} + +func CertificateRequireEqualResponse(t *testing.T, cert TestCertificate, resp types.CertificateResponse, state types.Certificate_State) { + t.Helper() + + require.Equal(t, state, resp.Certificate.State) + require.Equal(t, cert.PEM.Cert, resp.Certificate.Cert) + require.Equal(t, cert.PEM.Pub, resp.Certificate.Pubkey) +} diff --git a/go/node/client/testutil/v1beta3/channel_wait.go b/go/node/client/testutil/v1beta3/channel_wait.go new file mode 100644 index 00000000..f9ef17e4 --- /dev/null +++ b/go/node/client/testutil/v1beta3/channel_wait.go @@ -0,0 +1,71 @@ +package testutil + +import ( + "reflect" + "testing" + "time" +) + +func ChannelWaitForValueUpTo(t *testing.T, waitOn interface{}, waitFor time.Duration) interface{} { + cases := make([]reflect.SelectCase, 2) + cases[0] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(waitOn), + Send: reflect.Value{}, + } + + delayChan := time.After(waitFor) + + cases[1] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(delayChan), + Send: reflect.Value{}, + } + + idx, v, ok := reflect.Select(cases) + if !ok { + t.Fatal("Channel has been closed") + } + if idx != 0 { + t.Fatalf("No message after waiting %v seconds", waitFor) + } + + return v.Interface() +} + +const waitForDefault = 10 * time.Second + +func ChannelWaitForValue(t *testing.T, waitOn interface{}) interface{} { + return ChannelWaitForValueUpTo(t, waitOn, waitForDefault) +} + +func ChannelWaitForCloseUpTo(t *testing.T, waitOn interface{}, waitFor time.Duration) { + cases := make([]reflect.SelectCase, 2) + cases[0] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(waitOn), + Send: reflect.Value{}, + } + + delayChan := time.After(waitFor) + + cases[1] = reflect.SelectCase{ + Dir: reflect.SelectRecv, + Chan: reflect.ValueOf(delayChan), + Send: reflect.Value{}, + } + + idx, v, ok := reflect.Select(cases) + if !ok { + return // Channel closed, everything OK + } + if idx != 0 { + t.Fatalf("channel not closed after waiting %v seconds", waitOn) + } + + t.Fatalf("got unexpected message: %v", v.Interface()) +} + +func ChannelWaitForClose(t *testing.T, waitOn interface{}) { + ChannelWaitForCloseUpTo(t, waitOn, waitForDefault) +} diff --git a/go/node/client/testutil/v1beta3/deployment.go b/go/node/client/testutil/v1beta3/deployment.go new file mode 100644 index 00000000..2f11b57b --- /dev/null +++ b/go/node/client/testutil/v1beta3/deployment.go @@ -0,0 +1,61 @@ +package testutil + +import ( + "crypto/sha256" + "math/rand" + "testing" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" +) + +// sum256Seed provides a consistent sha256 value for initial Deployment.Version +const sum256Seed = "hihi" + +// DefaultDeploymentVersion provides consistent sha256 sum for initial Deployment.Version +var DefaultDeploymentVersion = sha256.Sum256([]byte(sum256Seed)) + +// Deployment generates a dtype.Deployment in state `DeploymentActive` +func Deployment(t testing.TB) dtypes.Deployment { + t.Helper() + return dtypes.Deployment{ + DeploymentID: DeploymentID(t), + State: dtypes.DeploymentActive, + Version: DefaultDeploymentVersion[:], + } +} + +// DeploymentGroup generates a dtype.DepDeploymentGroup in state `GroupOpen` +// with a set of random required attributes +func DeploymentGroup(t testing.TB, did dtypes.DeploymentID, gseq uint32) dtypes.Group { + t.Helper() + return dtypes.Group{ + GroupID: dtypes.MakeGroupID(did, gseq), + State: dtypes.GroupOpen, + GroupSpec: dtypes.GroupSpec{ + Name: Name(t, "dgroup"), + Requirements: PlacementRequirements(t), + Resources: Resources(t), + }, + } +} + +// GroupSpec generator +func GroupSpec(t testing.TB) dtypes.GroupSpec { + t.Helper() + return dtypes.GroupSpec{ + Name: Name(t, "dgroup"), + Requirements: PlacementRequirements(t), + Resources: Resources(t), + } +} + +// DeploymentGroups returns a set of deployment groups generated by DeploymentGroup +func DeploymentGroups(t testing.TB, did dtypes.DeploymentID, gseq uint32) []dtypes.Group { + t.Helper() + count := rand.Intn(5) + 5 // nolint:gosec + vals := make([]dtypes.Group, 0, count) + for i := 0; i < count; i++ { + vals = append(vals, DeploymentGroup(t, did, gseq+uint32(i))) + } + return vals +} diff --git a/go/node/client/testutil/v1beta3/ids.go b/go/node/client/testutil/v1beta3/ids.go new file mode 100644 index 00000000..8c1e9aea --- /dev/null +++ b/go/node/client/testutil/v1beta3/ids.go @@ -0,0 +1,102 @@ +package testutil + +import ( + cryptorand "crypto/rand" + "crypto/sha256" + "math/rand" + "testing" + + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/tendermint/tendermint/crypto/ed25519" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + mtypes "github.com/akash-network/akash-api/go/node/market/v1beta5" +) + +func Keyring(t testing.TB) keyring.Keyring { + t.Helper() + obj := keyring.NewInMemory() + return obj +} + +// AccAddress provides an Account's Address bytes from a ed25519 generated +// private key. +func AccAddress(t testing.TB) sdk.AccAddress { + t.Helper() + privKey := ed25519.GenPrivKey() + return sdk.AccAddress(privKey.PubKey().Address()) +} + +func Key(t testing.TB) ed25519.PrivKey { + t.Helper() + return ed25519.GenPrivKey() +} + +func DeploymentID(t testing.TB) dtypes.DeploymentID { + t.Helper() + return dtypes.DeploymentID{ + Owner: AccAddress(t).String(), + DSeq: uint64(rand.Uint32()), // nolint: gosec + } +} + +func DeploymentIDForAccount(t testing.TB, addr sdk.Address) dtypes.DeploymentID { + t.Helper() + return dtypes.DeploymentID{ + Owner: addr.String(), + DSeq: uint64(rand.Uint32()), // nolint: gosec + } +} + +// DeploymentVersion provides a random sha256 sum for simulating Deployments. +func DeploymentVersion(t testing.TB) []byte { + t.Helper() + src := make([]byte, 128) + _, err := cryptorand.Read(src) + if err != nil { + t.Fatal(err) + } + sum := sha256.Sum256(src) + return sum[:] +} + +func GroupID(t testing.TB) dtypes.GroupID { + t.Helper() + return dtypes.MakeGroupID(DeploymentID(t), rand.Uint32()) // nolint: gosec +} + +func GroupIDForAccount(t testing.TB, addr sdk.Address) dtypes.GroupID { + t.Helper() + return dtypes.MakeGroupID(DeploymentIDForAccount(t, addr), rand.Uint32()) // nolint: gosec +} + +func OrderID(t testing.TB) mtypes.OrderID { + t.Helper() + return mtypes.MakeOrderID(GroupID(t), rand.Uint32()) // nolint: gosec +} + +func OrderIDForAccount(t testing.TB, addr sdk.Address) mtypes.OrderID { + t.Helper() + return mtypes.MakeOrderID(GroupIDForAccount(t, addr), rand.Uint32()) // nolint: gosec +} + +func BidID(t testing.TB) mtypes.BidID { + t.Helper() + return mtypes.MakeBidID(OrderID(t), AccAddress(t)) +} + +func BidIDForAccount(t testing.TB, owner, provider sdk.Address) mtypes.BidID { + t.Helper() + return mtypes.MakeBidID(OrderIDForAccount(t, owner), provider.Bytes()) +} + +func LeaseID(t testing.TB) mtypes.LeaseID { + t.Helper() + return mtypes.MakeLeaseID(BidID(t)) +} + +func LeaseIDForAccount(t testing.TB, owner, provider sdk.Address) mtypes.LeaseID { + t.Helper() + return mtypes.MakeLeaseID(BidIDForAccount(t, owner, provider)) +} diff --git a/go/node/client/testutil/v1beta3/log.go b/go/node/client/testutil/v1beta3/log.go new file mode 100644 index 00000000..3935d5e0 --- /dev/null +++ b/go/node/client/testutil/v1beta3/log.go @@ -0,0 +1,27 @@ +package testutil + +import ( + "sync" + "testing" + + "github.com/tendermint/tendermint/libs/log" +) + +func Logger(t testing.TB) log.Logger { + return log.NewTMLogger(&testWriter{TB: t}) +} + +// Source: https://git.sr.ht/~samwhited/testlog/tree/b1b3e8e82fd6990e91ce9d0fbcbe69ac2d9b1f98/testlog.go +type testWriter struct { + testing.TB + lock sync.Mutex +} + +func (tw *testWriter) Write(p []byte) (int, error) { + defer tw.lock.Unlock() + tw.lock.Lock() + + tw.Helper() + tw.Logf("%s", p) + return len(p), nil +} diff --git a/go/node/client/testutil/v1beta3/sdk.go b/go/node/client/testutil/v1beta3/sdk.go new file mode 100644 index 00000000..b530b7fd --- /dev/null +++ b/go/node/client/testutil/v1beta3/sdk.go @@ -0,0 +1,43 @@ +package testutil + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +func Coin(t testing.TB) sdk.Coin { + t.Helper() + return sdk.NewCoin("testcoin", sdk.NewInt(int64(RandRangeInt(1, 1000)))) // nolint: gosec +} + +func DecCoin(t testing.TB) sdk.DecCoin { + t.Helper() + return sdk.NewDecCoin("testcoin", sdk.NewInt(int64(RandRangeInt(1, 1000)))) // nolint: gosec +} + +// AkashCoinRandom provides simple interface to the Akash sdk.Coin type. +func AkashCoinRandom(t testing.TB) sdk.Coin { + t.Helper() + amt := sdk.NewInt(int64(RandRangeInt(1, 1000))) + return sdk.NewCoin(CoinDenom, amt) +} + +// AkashCoin provides simple interface to the Akash sdk.Coin type. +func AkashCoin(t testing.TB, amount int64) sdk.Coin { + t.Helper() + amt := sdk.NewInt(amount) + return sdk.NewCoin(CoinDenom, amt) +} + +func AkashDecCoin(t testing.TB, amount int64) sdk.DecCoin { + t.Helper() + amt := sdk.NewInt(amount) + return sdk.NewDecCoin(CoinDenom, amt) +} + +func AkashDecCoinRandom(t testing.TB) sdk.DecCoin { + t.Helper() + amt := sdk.NewInt(int64(RandRangeInt(1, 1000))) + return sdk.NewDecCoin(CoinDenom, amt) +} diff --git a/go/node/client/testutil/v1beta3/types.go b/go/node/client/testutil/v1beta3/types.go new file mode 100644 index 00000000..887debb9 --- /dev/null +++ b/go/node/client/testutil/v1beta3/types.go @@ -0,0 +1,23 @@ +package testutil + +import ( + "math/rand" +) + +func RandRangeInt(min, max int) int { + return rand.Intn(max-min) + min // nolint: gosec +} + +func RandRangeUint(min, max uint) uint { + val := rand.Uint64() // nolint: gosec + val %= uint64(max - min) + val += uint64(min) + return uint(val) +} + +func RandRangeUint64(min, max uint64) uint64 { + val := rand.Uint64() // nolint: gosec + val %= max - min + val += min + return val +} diff --git a/go/node/client/v1beta3/client.go b/go/node/client/v1beta3/client.go new file mode 100644 index 00000000..ab533512 --- /dev/null +++ b/go/node/client/v1beta3/client.go @@ -0,0 +1,143 @@ +package v1beta3 + +import ( + "context" + "fmt" + + "github.com/gogo/protobuf/proto" + + tmrpc "github.com/tendermint/tendermint/rpc/core/types" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + sdk "github.com/cosmos/cosmos-sdk/types" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/authz" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + evdtypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + feegranttypes "github.com/cosmos/cosmos-sdk/x/feegrant" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal" + slashtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + staketypes "github.com/cosmos/cosmos-sdk/x/staking/types" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + atypes "github.com/akash-network/akash-api/go/node/audit/v1beta4" + ctypes "github.com/akash-network/akash-api/go/node/cert/v1beta3" + cltypes "github.com/akash-network/akash-api/go/node/client/types" + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + mtypes "github.com/akash-network/akash-api/go/node/market/v1beta5" + ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta4" +) + +// QueryClient is the interface that exposes query modules. +// +//go:generate mockery --name QueryClient --output ./mocks +type QueryClient interface { + dtypes.QueryClient + mtypes.QueryClient + ptypes.QueryClient + atypes.QueryClient + ctypes.QueryClient + Auth() authtypes.QueryClient + Authz() authz.QueryClient + Bank() banktypes.QueryClient + Distribution() disttypes.QueryClient + Evidence() evdtypes.QueryClient + Feegrant() feegranttypes.QueryClient + Gov() govtypes.QueryClient + Mint() minttypes.QueryClient + Params() paramtypes.QueryClient + Slashing() slashtypes.QueryClient + Staking() staketypes.QueryClient + Upgrade() upgradetypes.QueryClient + + ClientContext() sdkclient.Context +} + +// TxClient is the interface that wraps the Broadcast method. +// Broadcast broadcasts a transaction. A transaction is composed of 1 or many messages. This allows several +// operations to be performed in a single transaction. +// A transaction broadcast can be configured with an arbitrary number of BroadcastOption. +// +//go:generate mockery --name TxClient --output ./mocks +type TxClient interface { + Broadcast(context.Context, []sdk.Msg, ...BroadcastOption) (interface{}, error) +} + +//go:generate mockery --name NodeClient --output ./mocks +type NodeClient interface { + SyncInfo(ctx context.Context) (*tmrpc.SyncInfo, error) +} + +// Client is the umbrella interface that exposes every other client's modules. +// +//go:generate mockery --name Client --output ./mocks +type Client interface { + Query() QueryClient + Tx() TxClient + Node() NodeClient + ClientContext() sdkclient.Context + PrintMessage(interface{}) error +} + +type client struct { + qclient *queryClient + tx TxClient + node *node +} + +var _ Client = (*client)(nil) + +// NewClient creates a new client. +func NewClient(ctx context.Context, cctx sdkclient.Context, opts ...cltypes.ClientOption) (Client, error) { + nd := newNode(cctx) + + cl := &client{ + qclient: newQueryClient(cctx), + node: nd, + } + + var err error + cl.tx, err = newSerialTx(ctx, cctx, nd, opts...) + if err != nil { + return nil, err + } + + return cl, nil +} + +// Query implements Client by returning the QueryClient instance of the client. +func (cl *client) Query() QueryClient { + return cl.qclient +} + +// Tx implements Client by returning the TxClient instance of the client. +func (cl *client) Tx() TxClient { + return cl.tx +} + +// Node implements Client by returning the NodeClient instance of the client. +func (cl *client) Node() NodeClient { + return cl.node +} + +// ClientContext implements Client by returning the Cosmos SDK client context instance of the client. +func (cl *client) ClientContext() sdkclient.Context { + return cl.qclient.cctx +} + +// PrintMessage implements Client by printing the raw message passed as parameter. +func (cl *client) PrintMessage(msg interface{}) error { + var err error + + switch m := msg.(type) { + case proto.Message: + err = cl.qclient.cctx.PrintProto(m) + case []byte: + err = cl.qclient.cctx.PrintString(fmt.Sprintf("%s\n", string(m))) + } + + return err +} diff --git a/go/node/client/v1beta3/errors.go b/go/node/client/v1beta3/errors.go new file mode 100644 index 00000000..10ebaedb --- /dev/null +++ b/go/node/client/v1beta3/errors.go @@ -0,0 +1,11 @@ +package v1beta3 + +import ( + "errors" +) + +var ( + // ErrClientNotFound is a new error with message "Client not found" + ErrClientNotFound = errors.New("client not found") + ErrNodeNotSynced = errors.New("rpc node is not catching up") +) diff --git a/go/node/client/v1beta3/mocks/client.go b/go/node/client/v1beta3/mocks/client.go new file mode 100644 index 00000000..59c1ebc4 --- /dev/null +++ b/go/node/client/v1beta3/mocks/client.go @@ -0,0 +1,269 @@ +// Code generated by mockery v2.42.0. DO NOT EDIT. + +package mocks + +import ( + client "github.com/cosmos/cosmos-sdk/client" + mock "github.com/stretchr/testify/mock" + + v1beta3 "github.com/akash-network/akash-api/go/node/client/v1beta3" +) + +// Client is an autogenerated mock type for the Client type +type Client struct { + mock.Mock +} + +type Client_Expecter struct { + mock *mock.Mock +} + +func (_m *Client) EXPECT() *Client_Expecter { + return &Client_Expecter{mock: &_m.Mock} +} + +// ClientContext provides a mock function with given fields: +func (_m *Client) ClientContext() client.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ClientContext") + } + + var r0 client.Context + if rf, ok := ret.Get(0).(func() client.Context); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.Context) + } + + return r0 +} + +// Client_ClientContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClientContext' +type Client_ClientContext_Call struct { + *mock.Call +} + +// ClientContext is a helper method to define mock.On call +func (_e *Client_Expecter) ClientContext() *Client_ClientContext_Call { + return &Client_ClientContext_Call{Call: _e.mock.On("ClientContext")} +} + +func (_c *Client_ClientContext_Call) Run(run func()) *Client_ClientContext_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Client_ClientContext_Call) Return(_a0 client.Context) *Client_ClientContext_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_ClientContext_Call) RunAndReturn(run func() client.Context) *Client_ClientContext_Call { + _c.Call.Return(run) + return _c +} + +// Node provides a mock function with given fields: +func (_m *Client) Node() v1beta3.NodeClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Node") + } + + var r0 v1beta3.NodeClient + if rf, ok := ret.Get(0).(func() v1beta3.NodeClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta3.NodeClient) + } + } + + return r0 +} + +// Client_Node_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Node' +type Client_Node_Call struct { + *mock.Call +} + +// Node is a helper method to define mock.On call +func (_e *Client_Expecter) Node() *Client_Node_Call { + return &Client_Node_Call{Call: _e.mock.On("Node")} +} + +func (_c *Client_Node_Call) Run(run func()) *Client_Node_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Client_Node_Call) Return(_a0 v1beta3.NodeClient) *Client_Node_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_Node_Call) RunAndReturn(run func() v1beta3.NodeClient) *Client_Node_Call { + _c.Call.Return(run) + return _c +} + +// PrintMessage provides a mock function with given fields: _a0 +func (_m *Client) PrintMessage(_a0 interface{}) error { + ret := _m.Called(_a0) + + if len(ret) == 0 { + panic("no return value specified for PrintMessage") + } + + var r0 error + if rf, ok := ret.Get(0).(func(interface{}) error); ok { + r0 = rf(_a0) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// Client_PrintMessage_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PrintMessage' +type Client_PrintMessage_Call struct { + *mock.Call +} + +// PrintMessage is a helper method to define mock.On call +// - _a0 interface{} +func (_e *Client_Expecter) PrintMessage(_a0 interface{}) *Client_PrintMessage_Call { + return &Client_PrintMessage_Call{Call: _e.mock.On("PrintMessage", _a0)} +} + +func (_c *Client_PrintMessage_Call) Run(run func(_a0 interface{})) *Client_PrintMessage_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(interface{})) + }) + return _c +} + +func (_c *Client_PrintMessage_Call) Return(_a0 error) *Client_PrintMessage_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_PrintMessage_Call) RunAndReturn(run func(interface{}) error) *Client_PrintMessage_Call { + _c.Call.Return(run) + return _c +} + +// Query provides a mock function with given fields: +func (_m *Client) Query() v1beta3.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Query") + } + + var r0 v1beta3.QueryClient + if rf, ok := ret.Get(0).(func() v1beta3.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta3.QueryClient) + } + } + + return r0 +} + +// Client_Query_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Query' +type Client_Query_Call struct { + *mock.Call +} + +// Query is a helper method to define mock.On call +func (_e *Client_Expecter) Query() *Client_Query_Call { + return &Client_Query_Call{Call: _e.mock.On("Query")} +} + +func (_c *Client_Query_Call) Run(run func()) *Client_Query_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Client_Query_Call) Return(_a0 v1beta3.QueryClient) *Client_Query_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_Query_Call) RunAndReturn(run func() v1beta3.QueryClient) *Client_Query_Call { + _c.Call.Return(run) + return _c +} + +// Tx provides a mock function with given fields: +func (_m *Client) Tx() v1beta3.TxClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Tx") + } + + var r0 v1beta3.TxClient + if rf, ok := ret.Get(0).(func() v1beta3.TxClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(v1beta3.TxClient) + } + } + + return r0 +} + +// Client_Tx_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Tx' +type Client_Tx_Call struct { + *mock.Call +} + +// Tx is a helper method to define mock.On call +func (_e *Client_Expecter) Tx() *Client_Tx_Call { + return &Client_Tx_Call{Call: _e.mock.On("Tx")} +} + +func (_c *Client_Tx_Call) Run(run func()) *Client_Tx_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *Client_Tx_Call) Return(_a0 v1beta3.TxClient) *Client_Tx_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *Client_Tx_Call) RunAndReturn(run func() v1beta3.TxClient) *Client_Tx_Call { + _c.Call.Return(run) + return _c +} + +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClient(t interface { + mock.TestingT + Cleanup(func()) +}) *Client { + mock := &Client{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/go/node/client/v1beta3/mocks/node_client.go b/go/node/client/v1beta3/mocks/node_client.go new file mode 100644 index 00000000..e567347b --- /dev/null +++ b/go/node/client/v1beta3/mocks/node_client.go @@ -0,0 +1,95 @@ +// Code generated by mockery v2.42.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + mock "github.com/stretchr/testify/mock" + coretypes "github.com/tendermint/tendermint/rpc/core/types" +) + +// NodeClient is an autogenerated mock type for the NodeClient type +type NodeClient struct { + mock.Mock +} + +type NodeClient_Expecter struct { + mock *mock.Mock +} + +func (_m *NodeClient) EXPECT() *NodeClient_Expecter { + return &NodeClient_Expecter{mock: &_m.Mock} +} + +// SyncInfo provides a mock function with given fields: ctx +func (_m *NodeClient) SyncInfo(ctx context.Context) (*coretypes.SyncInfo, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for SyncInfo") + } + + var r0 *coretypes.SyncInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*coretypes.SyncInfo, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *coretypes.SyncInfo); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*coretypes.SyncInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// NodeClient_SyncInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'SyncInfo' +type NodeClient_SyncInfo_Call struct { + *mock.Call +} + +// SyncInfo is a helper method to define mock.On call +// - ctx context.Context +func (_e *NodeClient_Expecter) SyncInfo(ctx interface{}) *NodeClient_SyncInfo_Call { + return &NodeClient_SyncInfo_Call{Call: _e.mock.On("SyncInfo", ctx)} +} + +func (_c *NodeClient_SyncInfo_Call) Run(run func(ctx context.Context)) *NodeClient_SyncInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *NodeClient_SyncInfo_Call) Return(_a0 *coretypes.SyncInfo, _a1 error) *NodeClient_SyncInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *NodeClient_SyncInfo_Call) RunAndReturn(run func(context.Context) (*coretypes.SyncInfo, error)) *NodeClient_SyncInfo_Call { + _c.Call.Return(run) + return _c +} + +// NewNodeClient creates a new instance of NodeClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewNodeClient(t interface { + mock.TestingT + Cleanup(func()) +}) *NodeClient { + mock := &NodeClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/go/node/client/v1beta3/mocks/query_client.go b/go/node/client/v1beta3/mocks/query_client.go new file mode 100644 index 00000000..ca8bec86 --- /dev/null +++ b/go/node/client/v1beta3/mocks/query_client.go @@ -0,0 +1,1866 @@ +// Code generated by mockery v2.42.0. DO NOT EDIT. + +package mocks + +import ( + authz "github.com/cosmos/cosmos-sdk/x/authz" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + + certv1beta3 "github.com/akash-network/akash-api/go/node/cert/v1beta3" + + client "github.com/cosmos/cosmos-sdk/client" + + context "context" + + deploymentv1beta4 "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + + distributiontypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + + evidencetypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + + feegrant "github.com/cosmos/cosmos-sdk/x/feegrant" + + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + + grpc "google.golang.org/grpc" + + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + + mock "github.com/stretchr/testify/mock" + + proposal "github.com/cosmos/cosmos-sdk/x/params/types/proposal" + + providerv1beta4 "github.com/akash-network/akash-api/go/node/provider/v1beta4" + + slashingtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + + stakingtypes "github.com/cosmos/cosmos-sdk/x/staking/types" + + types "github.com/cosmos/cosmos-sdk/x/auth/types" + + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + v1beta4 "github.com/akash-network/akash-api/go/node/audit/v1beta4" + + v1beta5 "github.com/akash-network/akash-api/go/node/market/v1beta5" +) + +// QueryClient is an autogenerated mock type for the QueryClient type +type QueryClient struct { + mock.Mock +} + +type QueryClient_Expecter struct { + mock *mock.Mock +} + +func (_m *QueryClient) EXPECT() *QueryClient_Expecter { + return &QueryClient_Expecter{mock: &_m.Mock} +} + +// AllProvidersAttributes provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) AllProvidersAttributes(ctx context.Context, in *v1beta4.QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for AllProvidersAttributes") + } + + var r0 *v1beta4.QueryProvidersResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryAllProvidersAttributesRequest, ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryAllProvidersAttributesRequest, ...grpc.CallOption) *v1beta4.QueryProvidersResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta4.QueryProvidersResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryAllProvidersAttributesRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_AllProvidersAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AllProvidersAttributes' +type QueryClient_AllProvidersAttributes_Call struct { + *mock.Call +} + +// AllProvidersAttributes is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta4.QueryAllProvidersAttributesRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) AllProvidersAttributes(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_AllProvidersAttributes_Call { + return &QueryClient_AllProvidersAttributes_Call{Call: _e.mock.On("AllProvidersAttributes", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_AllProvidersAttributes_Call) Run(run func(ctx context.Context, in *v1beta4.QueryAllProvidersAttributesRequest, opts ...grpc.CallOption)) *QueryClient_AllProvidersAttributes_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta4.QueryAllProvidersAttributesRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_AllProvidersAttributes_Call) Return(_a0 *v1beta4.QueryProvidersResponse, _a1 error) *QueryClient_AllProvidersAttributes_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_AllProvidersAttributes_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryAllProvidersAttributesRequest, ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error)) *QueryClient_AllProvidersAttributes_Call { + _c.Call.Return(run) + return _c +} + +// AuditorAttributes provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) AuditorAttributes(ctx context.Context, in *v1beta4.QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for AuditorAttributes") + } + + var r0 *v1beta4.QueryProvidersResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryAuditorAttributesRequest, ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryAuditorAttributesRequest, ...grpc.CallOption) *v1beta4.QueryProvidersResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta4.QueryProvidersResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryAuditorAttributesRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_AuditorAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AuditorAttributes' +type QueryClient_AuditorAttributes_Call struct { + *mock.Call +} + +// AuditorAttributes is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta4.QueryAuditorAttributesRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) AuditorAttributes(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_AuditorAttributes_Call { + return &QueryClient_AuditorAttributes_Call{Call: _e.mock.On("AuditorAttributes", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_AuditorAttributes_Call) Run(run func(ctx context.Context, in *v1beta4.QueryAuditorAttributesRequest, opts ...grpc.CallOption)) *QueryClient_AuditorAttributes_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta4.QueryAuditorAttributesRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_AuditorAttributes_Call) Return(_a0 *v1beta4.QueryProvidersResponse, _a1 error) *QueryClient_AuditorAttributes_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_AuditorAttributes_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryAuditorAttributesRequest, ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error)) *QueryClient_AuditorAttributes_Call { + _c.Call.Return(run) + return _c +} + +// Auth provides a mock function with given fields: +func (_m *QueryClient) Auth() types.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Auth") + } + + var r0 types.QueryClient + if rf, ok := ret.Get(0).(func() types.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(types.QueryClient) + } + } + + return r0 +} + +// QueryClient_Auth_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Auth' +type QueryClient_Auth_Call struct { + *mock.Call +} + +// Auth is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Auth() *QueryClient_Auth_Call { + return &QueryClient_Auth_Call{Call: _e.mock.On("Auth")} +} + +func (_c *QueryClient_Auth_Call) Run(run func()) *QueryClient_Auth_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Auth_Call) Return(_a0 types.QueryClient) *QueryClient_Auth_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Auth_Call) RunAndReturn(run func() types.QueryClient) *QueryClient_Auth_Call { + _c.Call.Return(run) + return _c +} + +// Authz provides a mock function with given fields: +func (_m *QueryClient) Authz() authz.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Authz") + } + + var r0 authz.QueryClient + if rf, ok := ret.Get(0).(func() authz.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(authz.QueryClient) + } + } + + return r0 +} + +// QueryClient_Authz_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Authz' +type QueryClient_Authz_Call struct { + *mock.Call +} + +// Authz is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Authz() *QueryClient_Authz_Call { + return &QueryClient_Authz_Call{Call: _e.mock.On("Authz")} +} + +func (_c *QueryClient_Authz_Call) Run(run func()) *QueryClient_Authz_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Authz_Call) Return(_a0 authz.QueryClient) *QueryClient_Authz_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Authz_Call) RunAndReturn(run func() authz.QueryClient) *QueryClient_Authz_Call { + _c.Call.Return(run) + return _c +} + +// Bank provides a mock function with given fields: +func (_m *QueryClient) Bank() banktypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Bank") + } + + var r0 banktypes.QueryClient + if rf, ok := ret.Get(0).(func() banktypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(banktypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Bank_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bank' +type QueryClient_Bank_Call struct { + *mock.Call +} + +// Bank is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Bank() *QueryClient_Bank_Call { + return &QueryClient_Bank_Call{Call: _e.mock.On("Bank")} +} + +func (_c *QueryClient_Bank_Call) Run(run func()) *QueryClient_Bank_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Bank_Call) Return(_a0 banktypes.QueryClient) *QueryClient_Bank_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Bank_Call) RunAndReturn(run func() banktypes.QueryClient) *QueryClient_Bank_Call { + _c.Call.Return(run) + return _c +} + +// Bid provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Bid(ctx context.Context, in *v1beta5.QueryBidRequest, opts ...grpc.CallOption) (*v1beta5.QueryBidResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Bid") + } + + var r0 *v1beta5.QueryBidResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryBidRequest, ...grpc.CallOption) (*v1beta5.QueryBidResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryBidRequest, ...grpc.CallOption) *v1beta5.QueryBidResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta5.QueryBidResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta5.QueryBidRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Bid_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bid' +type QueryClient_Bid_Call struct { + *mock.Call +} + +// Bid is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta5.QueryBidRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Bid(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Bid_Call { + return &QueryClient_Bid_Call{Call: _e.mock.On("Bid", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Bid_Call) Run(run func(ctx context.Context, in *v1beta5.QueryBidRequest, opts ...grpc.CallOption)) *QueryClient_Bid_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta5.QueryBidRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Bid_Call) Return(_a0 *v1beta5.QueryBidResponse, _a1 error) *QueryClient_Bid_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Bid_Call) RunAndReturn(run func(context.Context, *v1beta5.QueryBidRequest, ...grpc.CallOption) (*v1beta5.QueryBidResponse, error)) *QueryClient_Bid_Call { + _c.Call.Return(run) + return _c +} + +// Bids provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Bids(ctx context.Context, in *v1beta5.QueryBidsRequest, opts ...grpc.CallOption) (*v1beta5.QueryBidsResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Bids") + } + + var r0 *v1beta5.QueryBidsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryBidsRequest, ...grpc.CallOption) (*v1beta5.QueryBidsResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryBidsRequest, ...grpc.CallOption) *v1beta5.QueryBidsResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta5.QueryBidsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta5.QueryBidsRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Bids_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Bids' +type QueryClient_Bids_Call struct { + *mock.Call +} + +// Bids is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta5.QueryBidsRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Bids(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Bids_Call { + return &QueryClient_Bids_Call{Call: _e.mock.On("Bids", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Bids_Call) Run(run func(ctx context.Context, in *v1beta5.QueryBidsRequest, opts ...grpc.CallOption)) *QueryClient_Bids_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta5.QueryBidsRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Bids_Call) Return(_a0 *v1beta5.QueryBidsResponse, _a1 error) *QueryClient_Bids_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Bids_Call) RunAndReturn(run func(context.Context, *v1beta5.QueryBidsRequest, ...grpc.CallOption) (*v1beta5.QueryBidsResponse, error)) *QueryClient_Bids_Call { + _c.Call.Return(run) + return _c +} + +// Certificates provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Certificates(ctx context.Context, in *certv1beta3.QueryCertificatesRequest, opts ...grpc.CallOption) (*certv1beta3.QueryCertificatesResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Certificates") + } + + var r0 *certv1beta3.QueryCertificatesResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) (*certv1beta3.QueryCertificatesResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) *certv1beta3.QueryCertificatesResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*certv1beta3.QueryCertificatesResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Certificates_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Certificates' +type QueryClient_Certificates_Call struct { + *mock.Call +} + +// Certificates is a helper method to define mock.On call +// - ctx context.Context +// - in *certv1beta3.QueryCertificatesRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Certificates(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Certificates_Call { + return &QueryClient_Certificates_Call{Call: _e.mock.On("Certificates", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Certificates_Call) Run(run func(ctx context.Context, in *certv1beta3.QueryCertificatesRequest, opts ...grpc.CallOption)) *QueryClient_Certificates_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*certv1beta3.QueryCertificatesRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Certificates_Call) Return(_a0 *certv1beta3.QueryCertificatesResponse, _a1 error) *QueryClient_Certificates_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Certificates_Call) RunAndReturn(run func(context.Context, *certv1beta3.QueryCertificatesRequest, ...grpc.CallOption) (*certv1beta3.QueryCertificatesResponse, error)) *QueryClient_Certificates_Call { + _c.Call.Return(run) + return _c +} + +// ClientContext provides a mock function with given fields: +func (_m *QueryClient) ClientContext() client.Context { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for ClientContext") + } + + var r0 client.Context + if rf, ok := ret.Get(0).(func() client.Context); ok { + r0 = rf() + } else { + r0 = ret.Get(0).(client.Context) + } + + return r0 +} + +// QueryClient_ClientContext_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ClientContext' +type QueryClient_ClientContext_Call struct { + *mock.Call +} + +// ClientContext is a helper method to define mock.On call +func (_e *QueryClient_Expecter) ClientContext() *QueryClient_ClientContext_Call { + return &QueryClient_ClientContext_Call{Call: _e.mock.On("ClientContext")} +} + +func (_c *QueryClient_ClientContext_Call) Run(run func()) *QueryClient_ClientContext_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_ClientContext_Call) Return(_a0 client.Context) *QueryClient_ClientContext_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_ClientContext_Call) RunAndReturn(run func() client.Context) *QueryClient_ClientContext_Call { + _c.Call.Return(run) + return _c +} + +// Deployment provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Deployment(ctx context.Context, in *deploymentv1beta4.QueryDeploymentRequest, opts ...grpc.CallOption) (*deploymentv1beta4.QueryDeploymentResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Deployment") + } + + var r0 *deploymentv1beta4.QueryDeploymentResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta4.QueryDeploymentRequest, ...grpc.CallOption) (*deploymentv1beta4.QueryDeploymentResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta4.QueryDeploymentRequest, ...grpc.CallOption) *deploymentv1beta4.QueryDeploymentResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*deploymentv1beta4.QueryDeploymentResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *deploymentv1beta4.QueryDeploymentRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Deployment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deployment' +type QueryClient_Deployment_Call struct { + *mock.Call +} + +// Deployment is a helper method to define mock.On call +// - ctx context.Context +// - in *deploymentv1beta4.QueryDeploymentRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Deployment(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Deployment_Call { + return &QueryClient_Deployment_Call{Call: _e.mock.On("Deployment", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Deployment_Call) Run(run func(ctx context.Context, in *deploymentv1beta4.QueryDeploymentRequest, opts ...grpc.CallOption)) *QueryClient_Deployment_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*deploymentv1beta4.QueryDeploymentRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Deployment_Call) Return(_a0 *deploymentv1beta4.QueryDeploymentResponse, _a1 error) *QueryClient_Deployment_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Deployment_Call) RunAndReturn(run func(context.Context, *deploymentv1beta4.QueryDeploymentRequest, ...grpc.CallOption) (*deploymentv1beta4.QueryDeploymentResponse, error)) *QueryClient_Deployment_Call { + _c.Call.Return(run) + return _c +} + +// Deployments provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Deployments(ctx context.Context, in *deploymentv1beta4.QueryDeploymentsRequest, opts ...grpc.CallOption) (*deploymentv1beta4.QueryDeploymentsResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Deployments") + } + + var r0 *deploymentv1beta4.QueryDeploymentsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta4.QueryDeploymentsRequest, ...grpc.CallOption) (*deploymentv1beta4.QueryDeploymentsResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta4.QueryDeploymentsRequest, ...grpc.CallOption) *deploymentv1beta4.QueryDeploymentsResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*deploymentv1beta4.QueryDeploymentsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *deploymentv1beta4.QueryDeploymentsRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Deployments_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Deployments' +type QueryClient_Deployments_Call struct { + *mock.Call +} + +// Deployments is a helper method to define mock.On call +// - ctx context.Context +// - in *deploymentv1beta4.QueryDeploymentsRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Deployments(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Deployments_Call { + return &QueryClient_Deployments_Call{Call: _e.mock.On("Deployments", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Deployments_Call) Run(run func(ctx context.Context, in *deploymentv1beta4.QueryDeploymentsRequest, opts ...grpc.CallOption)) *QueryClient_Deployments_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*deploymentv1beta4.QueryDeploymentsRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Deployments_Call) Return(_a0 *deploymentv1beta4.QueryDeploymentsResponse, _a1 error) *QueryClient_Deployments_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Deployments_Call) RunAndReturn(run func(context.Context, *deploymentv1beta4.QueryDeploymentsRequest, ...grpc.CallOption) (*deploymentv1beta4.QueryDeploymentsResponse, error)) *QueryClient_Deployments_Call { + _c.Call.Return(run) + return _c +} + +// Distribution provides a mock function with given fields: +func (_m *QueryClient) Distribution() distributiontypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Distribution") + } + + var r0 distributiontypes.QueryClient + if rf, ok := ret.Get(0).(func() distributiontypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(distributiontypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Distribution_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Distribution' +type QueryClient_Distribution_Call struct { + *mock.Call +} + +// Distribution is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Distribution() *QueryClient_Distribution_Call { + return &QueryClient_Distribution_Call{Call: _e.mock.On("Distribution")} +} + +func (_c *QueryClient_Distribution_Call) Run(run func()) *QueryClient_Distribution_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Distribution_Call) Return(_a0 distributiontypes.QueryClient) *QueryClient_Distribution_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Distribution_Call) RunAndReturn(run func() distributiontypes.QueryClient) *QueryClient_Distribution_Call { + _c.Call.Return(run) + return _c +} + +// Evidence provides a mock function with given fields: +func (_m *QueryClient) Evidence() evidencetypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Evidence") + } + + var r0 evidencetypes.QueryClient + if rf, ok := ret.Get(0).(func() evidencetypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(evidencetypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Evidence_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Evidence' +type QueryClient_Evidence_Call struct { + *mock.Call +} + +// Evidence is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Evidence() *QueryClient_Evidence_Call { + return &QueryClient_Evidence_Call{Call: _e.mock.On("Evidence")} +} + +func (_c *QueryClient_Evidence_Call) Run(run func()) *QueryClient_Evidence_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Evidence_Call) Return(_a0 evidencetypes.QueryClient) *QueryClient_Evidence_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Evidence_Call) RunAndReturn(run func() evidencetypes.QueryClient) *QueryClient_Evidence_Call { + _c.Call.Return(run) + return _c +} + +// Feegrant provides a mock function with given fields: +func (_m *QueryClient) Feegrant() feegrant.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Feegrant") + } + + var r0 feegrant.QueryClient + if rf, ok := ret.Get(0).(func() feegrant.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(feegrant.QueryClient) + } + } + + return r0 +} + +// QueryClient_Feegrant_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Feegrant' +type QueryClient_Feegrant_Call struct { + *mock.Call +} + +// Feegrant is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Feegrant() *QueryClient_Feegrant_Call { + return &QueryClient_Feegrant_Call{Call: _e.mock.On("Feegrant")} +} + +func (_c *QueryClient_Feegrant_Call) Run(run func()) *QueryClient_Feegrant_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Feegrant_Call) Return(_a0 feegrant.QueryClient) *QueryClient_Feegrant_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Feegrant_Call) RunAndReturn(run func() feegrant.QueryClient) *QueryClient_Feegrant_Call { + _c.Call.Return(run) + return _c +} + +// Gov provides a mock function with given fields: +func (_m *QueryClient) Gov() govtypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Gov") + } + + var r0 govtypes.QueryClient + if rf, ok := ret.Get(0).(func() govtypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(govtypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Gov_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Gov' +type QueryClient_Gov_Call struct { + *mock.Call +} + +// Gov is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Gov() *QueryClient_Gov_Call { + return &QueryClient_Gov_Call{Call: _e.mock.On("Gov")} +} + +func (_c *QueryClient_Gov_Call) Run(run func()) *QueryClient_Gov_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Gov_Call) Return(_a0 govtypes.QueryClient) *QueryClient_Gov_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Gov_Call) RunAndReturn(run func() govtypes.QueryClient) *QueryClient_Gov_Call { + _c.Call.Return(run) + return _c +} + +// Group provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Group(ctx context.Context, in *deploymentv1beta4.QueryGroupRequest, opts ...grpc.CallOption) (*deploymentv1beta4.QueryGroupResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Group") + } + + var r0 *deploymentv1beta4.QueryGroupResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta4.QueryGroupRequest, ...grpc.CallOption) (*deploymentv1beta4.QueryGroupResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *deploymentv1beta4.QueryGroupRequest, ...grpc.CallOption) *deploymentv1beta4.QueryGroupResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*deploymentv1beta4.QueryGroupResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *deploymentv1beta4.QueryGroupRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Group_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Group' +type QueryClient_Group_Call struct { + *mock.Call +} + +// Group is a helper method to define mock.On call +// - ctx context.Context +// - in *deploymentv1beta4.QueryGroupRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Group(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Group_Call { + return &QueryClient_Group_Call{Call: _e.mock.On("Group", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Group_Call) Run(run func(ctx context.Context, in *deploymentv1beta4.QueryGroupRequest, opts ...grpc.CallOption)) *QueryClient_Group_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*deploymentv1beta4.QueryGroupRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Group_Call) Return(_a0 *deploymentv1beta4.QueryGroupResponse, _a1 error) *QueryClient_Group_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Group_Call) RunAndReturn(run func(context.Context, *deploymentv1beta4.QueryGroupRequest, ...grpc.CallOption) (*deploymentv1beta4.QueryGroupResponse, error)) *QueryClient_Group_Call { + _c.Call.Return(run) + return _c +} + +// Lease provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Lease(ctx context.Context, in *v1beta5.QueryLeaseRequest, opts ...grpc.CallOption) (*v1beta5.QueryLeaseResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Lease") + } + + var r0 *v1beta5.QueryLeaseResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryLeaseRequest, ...grpc.CallOption) (*v1beta5.QueryLeaseResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryLeaseRequest, ...grpc.CallOption) *v1beta5.QueryLeaseResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta5.QueryLeaseResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta5.QueryLeaseRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Lease_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Lease' +type QueryClient_Lease_Call struct { + *mock.Call +} + +// Lease is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta5.QueryLeaseRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Lease(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Lease_Call { + return &QueryClient_Lease_Call{Call: _e.mock.On("Lease", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Lease_Call) Run(run func(ctx context.Context, in *v1beta5.QueryLeaseRequest, opts ...grpc.CallOption)) *QueryClient_Lease_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta5.QueryLeaseRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Lease_Call) Return(_a0 *v1beta5.QueryLeaseResponse, _a1 error) *QueryClient_Lease_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Lease_Call) RunAndReturn(run func(context.Context, *v1beta5.QueryLeaseRequest, ...grpc.CallOption) (*v1beta5.QueryLeaseResponse, error)) *QueryClient_Lease_Call { + _c.Call.Return(run) + return _c +} + +// Leases provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Leases(ctx context.Context, in *v1beta5.QueryLeasesRequest, opts ...grpc.CallOption) (*v1beta5.QueryLeasesResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Leases") + } + + var r0 *v1beta5.QueryLeasesResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryLeasesRequest, ...grpc.CallOption) (*v1beta5.QueryLeasesResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryLeasesRequest, ...grpc.CallOption) *v1beta5.QueryLeasesResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta5.QueryLeasesResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta5.QueryLeasesRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Leases_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Leases' +type QueryClient_Leases_Call struct { + *mock.Call +} + +// Leases is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta5.QueryLeasesRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Leases(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Leases_Call { + return &QueryClient_Leases_Call{Call: _e.mock.On("Leases", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Leases_Call) Run(run func(ctx context.Context, in *v1beta5.QueryLeasesRequest, opts ...grpc.CallOption)) *QueryClient_Leases_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta5.QueryLeasesRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Leases_Call) Return(_a0 *v1beta5.QueryLeasesResponse, _a1 error) *QueryClient_Leases_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Leases_Call) RunAndReturn(run func(context.Context, *v1beta5.QueryLeasesRequest, ...grpc.CallOption) (*v1beta5.QueryLeasesResponse, error)) *QueryClient_Leases_Call { + _c.Call.Return(run) + return _c +} + +// Mint provides a mock function with given fields: +func (_m *QueryClient) Mint() minttypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Mint") + } + + var r0 minttypes.QueryClient + if rf, ok := ret.Get(0).(func() minttypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(minttypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Mint_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Mint' +type QueryClient_Mint_Call struct { + *mock.Call +} + +// Mint is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Mint() *QueryClient_Mint_Call { + return &QueryClient_Mint_Call{Call: _e.mock.On("Mint")} +} + +func (_c *QueryClient_Mint_Call) Run(run func()) *QueryClient_Mint_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Mint_Call) Return(_a0 minttypes.QueryClient) *QueryClient_Mint_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Mint_Call) RunAndReturn(run func() minttypes.QueryClient) *QueryClient_Mint_Call { + _c.Call.Return(run) + return _c +} + +// Order provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Order(ctx context.Context, in *v1beta5.QueryOrderRequest, opts ...grpc.CallOption) (*v1beta5.QueryOrderResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Order") + } + + var r0 *v1beta5.QueryOrderResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryOrderRequest, ...grpc.CallOption) (*v1beta5.QueryOrderResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryOrderRequest, ...grpc.CallOption) *v1beta5.QueryOrderResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta5.QueryOrderResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta5.QueryOrderRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Order_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Order' +type QueryClient_Order_Call struct { + *mock.Call +} + +// Order is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta5.QueryOrderRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Order(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Order_Call { + return &QueryClient_Order_Call{Call: _e.mock.On("Order", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Order_Call) Run(run func(ctx context.Context, in *v1beta5.QueryOrderRequest, opts ...grpc.CallOption)) *QueryClient_Order_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta5.QueryOrderRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Order_Call) Return(_a0 *v1beta5.QueryOrderResponse, _a1 error) *QueryClient_Order_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Order_Call) RunAndReturn(run func(context.Context, *v1beta5.QueryOrderRequest, ...grpc.CallOption) (*v1beta5.QueryOrderResponse, error)) *QueryClient_Order_Call { + _c.Call.Return(run) + return _c +} + +// Orders provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Orders(ctx context.Context, in *v1beta5.QueryOrdersRequest, opts ...grpc.CallOption) (*v1beta5.QueryOrdersResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Orders") + } + + var r0 *v1beta5.QueryOrdersResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryOrdersRequest, ...grpc.CallOption) (*v1beta5.QueryOrdersResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta5.QueryOrdersRequest, ...grpc.CallOption) *v1beta5.QueryOrdersResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta5.QueryOrdersResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta5.QueryOrdersRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Orders_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Orders' +type QueryClient_Orders_Call struct { + *mock.Call +} + +// Orders is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta5.QueryOrdersRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Orders(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Orders_Call { + return &QueryClient_Orders_Call{Call: _e.mock.On("Orders", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Orders_Call) Run(run func(ctx context.Context, in *v1beta5.QueryOrdersRequest, opts ...grpc.CallOption)) *QueryClient_Orders_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta5.QueryOrdersRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Orders_Call) Return(_a0 *v1beta5.QueryOrdersResponse, _a1 error) *QueryClient_Orders_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Orders_Call) RunAndReturn(run func(context.Context, *v1beta5.QueryOrdersRequest, ...grpc.CallOption) (*v1beta5.QueryOrdersResponse, error)) *QueryClient_Orders_Call { + _c.Call.Return(run) + return _c +} + +// Params provides a mock function with given fields: +func (_m *QueryClient) Params() proposal.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Params") + } + + var r0 proposal.QueryClient + if rf, ok := ret.Get(0).(func() proposal.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(proposal.QueryClient) + } + } + + return r0 +} + +// QueryClient_Params_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Params' +type QueryClient_Params_Call struct { + *mock.Call +} + +// Params is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Params() *QueryClient_Params_Call { + return &QueryClient_Params_Call{Call: _e.mock.On("Params")} +} + +func (_c *QueryClient_Params_Call) Run(run func()) *QueryClient_Params_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Params_Call) Return(_a0 proposal.QueryClient) *QueryClient_Params_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Params_Call) RunAndReturn(run func() proposal.QueryClient) *QueryClient_Params_Call { + _c.Call.Return(run) + return _c +} + +// Provider provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Provider(ctx context.Context, in *providerv1beta4.QueryProviderRequest, opts ...grpc.CallOption) (*providerv1beta4.QueryProviderResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Provider") + } + + var r0 *providerv1beta4.QueryProviderResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta4.QueryProviderRequest, ...grpc.CallOption) (*providerv1beta4.QueryProviderResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta4.QueryProviderRequest, ...grpc.CallOption) *providerv1beta4.QueryProviderResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*providerv1beta4.QueryProviderResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *providerv1beta4.QueryProviderRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Provider_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Provider' +type QueryClient_Provider_Call struct { + *mock.Call +} + +// Provider is a helper method to define mock.On call +// - ctx context.Context +// - in *providerv1beta4.QueryProviderRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Provider(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Provider_Call { + return &QueryClient_Provider_Call{Call: _e.mock.On("Provider", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Provider_Call) Run(run func(ctx context.Context, in *providerv1beta4.QueryProviderRequest, opts ...grpc.CallOption)) *QueryClient_Provider_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*providerv1beta4.QueryProviderRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Provider_Call) Return(_a0 *providerv1beta4.QueryProviderResponse, _a1 error) *QueryClient_Provider_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Provider_Call) RunAndReturn(run func(context.Context, *providerv1beta4.QueryProviderRequest, ...grpc.CallOption) (*providerv1beta4.QueryProviderResponse, error)) *QueryClient_Provider_Call { + _c.Call.Return(run) + return _c +} + +// ProviderAttributes provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) ProviderAttributes(ctx context.Context, in *v1beta4.QueryProviderAttributesRequest, opts ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ProviderAttributes") + } + + var r0 *v1beta4.QueryProvidersResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryProviderAttributesRequest, ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryProviderAttributesRequest, ...grpc.CallOption) *v1beta4.QueryProvidersResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta4.QueryProvidersResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryProviderAttributesRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_ProviderAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProviderAttributes' +type QueryClient_ProviderAttributes_Call struct { + *mock.Call +} + +// ProviderAttributes is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta4.QueryProviderAttributesRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) ProviderAttributes(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_ProviderAttributes_Call { + return &QueryClient_ProviderAttributes_Call{Call: _e.mock.On("ProviderAttributes", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_ProviderAttributes_Call) Run(run func(ctx context.Context, in *v1beta4.QueryProviderAttributesRequest, opts ...grpc.CallOption)) *QueryClient_ProviderAttributes_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta4.QueryProviderAttributesRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_ProviderAttributes_Call) Return(_a0 *v1beta4.QueryProvidersResponse, _a1 error) *QueryClient_ProviderAttributes_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_ProviderAttributes_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryProviderAttributesRequest, ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error)) *QueryClient_ProviderAttributes_Call { + _c.Call.Return(run) + return _c +} + +// ProviderAuditorAttributes provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) ProviderAuditorAttributes(ctx context.Context, in *v1beta4.QueryProviderAuditorRequest, opts ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for ProviderAuditorAttributes") + } + + var r0 *v1beta4.QueryProvidersResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryProviderAuditorRequest, ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *v1beta4.QueryProviderAuditorRequest, ...grpc.CallOption) *v1beta4.QueryProvidersResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*v1beta4.QueryProvidersResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *v1beta4.QueryProviderAuditorRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_ProviderAuditorAttributes_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ProviderAuditorAttributes' +type QueryClient_ProviderAuditorAttributes_Call struct { + *mock.Call +} + +// ProviderAuditorAttributes is a helper method to define mock.On call +// - ctx context.Context +// - in *v1beta4.QueryProviderAuditorRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) ProviderAuditorAttributes(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_ProviderAuditorAttributes_Call { + return &QueryClient_ProviderAuditorAttributes_Call{Call: _e.mock.On("ProviderAuditorAttributes", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_ProviderAuditorAttributes_Call) Run(run func(ctx context.Context, in *v1beta4.QueryProviderAuditorRequest, opts ...grpc.CallOption)) *QueryClient_ProviderAuditorAttributes_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*v1beta4.QueryProviderAuditorRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_ProviderAuditorAttributes_Call) Return(_a0 *v1beta4.QueryProvidersResponse, _a1 error) *QueryClient_ProviderAuditorAttributes_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_ProviderAuditorAttributes_Call) RunAndReturn(run func(context.Context, *v1beta4.QueryProviderAuditorRequest, ...grpc.CallOption) (*v1beta4.QueryProvidersResponse, error)) *QueryClient_ProviderAuditorAttributes_Call { + _c.Call.Return(run) + return _c +} + +// Providers provides a mock function with given fields: ctx, in, opts +func (_m *QueryClient) Providers(ctx context.Context, in *providerv1beta4.QueryProvidersRequest, opts ...grpc.CallOption) (*providerv1beta4.QueryProvidersResponse, error) { + _va := make([]interface{}, len(opts)) + for _i := range opts { + _va[_i] = opts[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, in) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Providers") + } + + var r0 *providerv1beta4.QueryProvidersResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta4.QueryProvidersRequest, ...grpc.CallOption) (*providerv1beta4.QueryProvidersResponse, error)); ok { + return rf(ctx, in, opts...) + } + if rf, ok := ret.Get(0).(func(context.Context, *providerv1beta4.QueryProvidersRequest, ...grpc.CallOption) *providerv1beta4.QueryProvidersResponse); ok { + r0 = rf(ctx, in, opts...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*providerv1beta4.QueryProvidersResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, *providerv1beta4.QueryProvidersRequest, ...grpc.CallOption) error); ok { + r1 = rf(ctx, in, opts...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// QueryClient_Providers_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Providers' +type QueryClient_Providers_Call struct { + *mock.Call +} + +// Providers is a helper method to define mock.On call +// - ctx context.Context +// - in *providerv1beta4.QueryProvidersRequest +// - opts ...grpc.CallOption +func (_e *QueryClient_Expecter) Providers(ctx interface{}, in interface{}, opts ...interface{}) *QueryClient_Providers_Call { + return &QueryClient_Providers_Call{Call: _e.mock.On("Providers", + append([]interface{}{ctx, in}, opts...)...)} +} + +func (_c *QueryClient_Providers_Call) Run(run func(ctx context.Context, in *providerv1beta4.QueryProvidersRequest, opts ...grpc.CallOption)) *QueryClient_Providers_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]grpc.CallOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(grpc.CallOption) + } + } + run(args[0].(context.Context), args[1].(*providerv1beta4.QueryProvidersRequest), variadicArgs...) + }) + return _c +} + +func (_c *QueryClient_Providers_Call) Return(_a0 *providerv1beta4.QueryProvidersResponse, _a1 error) *QueryClient_Providers_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *QueryClient_Providers_Call) RunAndReturn(run func(context.Context, *providerv1beta4.QueryProvidersRequest, ...grpc.CallOption) (*providerv1beta4.QueryProvidersResponse, error)) *QueryClient_Providers_Call { + _c.Call.Return(run) + return _c +} + +// Slashing provides a mock function with given fields: +func (_m *QueryClient) Slashing() slashingtypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Slashing") + } + + var r0 slashingtypes.QueryClient + if rf, ok := ret.Get(0).(func() slashingtypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(slashingtypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Slashing_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Slashing' +type QueryClient_Slashing_Call struct { + *mock.Call +} + +// Slashing is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Slashing() *QueryClient_Slashing_Call { + return &QueryClient_Slashing_Call{Call: _e.mock.On("Slashing")} +} + +func (_c *QueryClient_Slashing_Call) Run(run func()) *QueryClient_Slashing_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Slashing_Call) Return(_a0 slashingtypes.QueryClient) *QueryClient_Slashing_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Slashing_Call) RunAndReturn(run func() slashingtypes.QueryClient) *QueryClient_Slashing_Call { + _c.Call.Return(run) + return _c +} + +// Staking provides a mock function with given fields: +func (_m *QueryClient) Staking() stakingtypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Staking") + } + + var r0 stakingtypes.QueryClient + if rf, ok := ret.Get(0).(func() stakingtypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(stakingtypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Staking_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Staking' +type QueryClient_Staking_Call struct { + *mock.Call +} + +// Staking is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Staking() *QueryClient_Staking_Call { + return &QueryClient_Staking_Call{Call: _e.mock.On("Staking")} +} + +func (_c *QueryClient_Staking_Call) Run(run func()) *QueryClient_Staking_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Staking_Call) Return(_a0 stakingtypes.QueryClient) *QueryClient_Staking_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Staking_Call) RunAndReturn(run func() stakingtypes.QueryClient) *QueryClient_Staking_Call { + _c.Call.Return(run) + return _c +} + +// Upgrade provides a mock function with given fields: +func (_m *QueryClient) Upgrade() upgradetypes.QueryClient { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for Upgrade") + } + + var r0 upgradetypes.QueryClient + if rf, ok := ret.Get(0).(func() upgradetypes.QueryClient); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(upgradetypes.QueryClient) + } + } + + return r0 +} + +// QueryClient_Upgrade_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Upgrade' +type QueryClient_Upgrade_Call struct { + *mock.Call +} + +// Upgrade is a helper method to define mock.On call +func (_e *QueryClient_Expecter) Upgrade() *QueryClient_Upgrade_Call { + return &QueryClient_Upgrade_Call{Call: _e.mock.On("Upgrade")} +} + +func (_c *QueryClient_Upgrade_Call) Run(run func()) *QueryClient_Upgrade_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *QueryClient_Upgrade_Call) Return(_a0 upgradetypes.QueryClient) *QueryClient_Upgrade_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *QueryClient_Upgrade_Call) RunAndReturn(run func() upgradetypes.QueryClient) *QueryClient_Upgrade_Call { + _c.Call.Return(run) + return _c +} + +// NewQueryClient creates a new instance of QueryClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewQueryClient(t interface { + mock.TestingT + Cleanup(func()) +}) *QueryClient { + mock := &QueryClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/go/node/client/v1beta3/mocks/tx_client.go b/go/node/client/v1beta3/mocks/tx_client.go new file mode 100644 index 00000000..3cef0ce3 --- /dev/null +++ b/go/node/client/v1beta3/mocks/tx_client.go @@ -0,0 +1,113 @@ +// Code generated by mockery v2.42.0. DO NOT EDIT. + +package mocks + +import ( + context "context" + + types "github.com/cosmos/cosmos-sdk/types" + mock "github.com/stretchr/testify/mock" + + v1beta3 "github.com/akash-network/akash-api/go/node/client/v1beta3" +) + +// TxClient is an autogenerated mock type for the TxClient type +type TxClient struct { + mock.Mock +} + +type TxClient_Expecter struct { + mock *mock.Mock +} + +func (_m *TxClient) EXPECT() *TxClient_Expecter { + return &TxClient_Expecter{mock: &_m.Mock} +} + +// Broadcast provides a mock function with given fields: _a0, _a1, _a2 +func (_m *TxClient) Broadcast(_a0 context.Context, _a1 []types.Msg, _a2 ...v1beta3.BroadcastOption) (interface{}, error) { + _va := make([]interface{}, len(_a2)) + for _i := range _a2 { + _va[_i] = _a2[_i] + } + var _ca []interface{} + _ca = append(_ca, _a0, _a1) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Broadcast") + } + + var r0 interface{} + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, []types.Msg, ...v1beta3.BroadcastOption) (interface{}, error)); ok { + return rf(_a0, _a1, _a2...) + } + if rf, ok := ret.Get(0).(func(context.Context, []types.Msg, ...v1beta3.BroadcastOption) interface{}); ok { + r0 = rf(_a0, _a1, _a2...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(interface{}) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, []types.Msg, ...v1beta3.BroadcastOption) error); ok { + r1 = rf(_a0, _a1, _a2...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// TxClient_Broadcast_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Broadcast' +type TxClient_Broadcast_Call struct { + *mock.Call +} + +// Broadcast is a helper method to define mock.On call +// - _a0 context.Context +// - _a1 []types.Msg +// - _a2 ...v1beta3.BroadcastOption +func (_e *TxClient_Expecter) Broadcast(_a0 interface{}, _a1 interface{}, _a2 ...interface{}) *TxClient_Broadcast_Call { + return &TxClient_Broadcast_Call{Call: _e.mock.On("Broadcast", + append([]interface{}{_a0, _a1}, _a2...)...)} +} + +func (_c *TxClient_Broadcast_Call) Run(run func(_a0 context.Context, _a1 []types.Msg, _a2 ...v1beta3.BroadcastOption)) *TxClient_Broadcast_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]v1beta3.BroadcastOption, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(v1beta3.BroadcastOption) + } + } + run(args[0].(context.Context), args[1].([]types.Msg), variadicArgs...) + }) + return _c +} + +func (_c *TxClient_Broadcast_Call) Return(_a0 interface{}, _a1 error) *TxClient_Broadcast_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *TxClient_Broadcast_Call) RunAndReturn(run func(context.Context, []types.Msg, ...v1beta3.BroadcastOption) (interface{}, error)) *TxClient_Broadcast_Call { + _c.Call.Return(run) + return _c +} + +// NewTxClient creates a new instance of TxClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewTxClient(t interface { + mock.TestingT + Cleanup(func()) +}) *TxClient { + mock := &TxClient{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/go/node/client/v1beta3/node.go b/go/node/client/v1beta3/node.go new file mode 100644 index 00000000..a0461993 --- /dev/null +++ b/go/node/client/v1beta3/node.go @@ -0,0 +1,34 @@ +package v1beta3 + +import ( + "context" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + rpcclient "github.com/tendermint/tendermint/rpc/client" + tmrpc "github.com/tendermint/tendermint/rpc/core/types" +) + +var _ NodeClient = (*node)(nil) + +type node struct { + rpc rpcclient.Client +} + +func newNode(cctx sdkclient.Context) *node { + nd := &node{ + rpc: cctx.Client, + } + + return nd +} + +func (nd *node) SyncInfo(ctx context.Context) (*tmrpc.SyncInfo, error) { + status, err := nd.rpc.Status(ctx) + if err != nil { + return nil, err + } + + info := status.SyncInfo + + return &info, nil +} diff --git a/go/node/client/v1beta3/options.go b/go/node/client/v1beta3/options.go new file mode 100644 index 00000000..3ace8f1e --- /dev/null +++ b/go/node/client/v1beta3/options.go @@ -0,0 +1,7 @@ +package v1beta3 + +type ClientOptions struct { + tclient TxClient // nolint: unused +} + +type ClientOption func(*ClientOptions) *ClientOptions diff --git a/go/node/client/v1beta3/query.go b/go/node/client/v1beta3/query.go new file mode 100644 index 00000000..ca90bf5a --- /dev/null +++ b/go/node/client/v1beta3/query.go @@ -0,0 +1,277 @@ +package v1beta3 + +import ( + "context" + + "google.golang.org/grpc" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + authtypes "github.com/cosmos/cosmos-sdk/x/auth/types" + "github.com/cosmos/cosmos-sdk/x/authz" + banktypes "github.com/cosmos/cosmos-sdk/x/bank/types" + disttypes "github.com/cosmos/cosmos-sdk/x/distribution/types" + evdtypes "github.com/cosmos/cosmos-sdk/x/evidence/types" + feegranttypes "github.com/cosmos/cosmos-sdk/x/feegrant" + govtypes "github.com/cosmos/cosmos-sdk/x/gov/types" + minttypes "github.com/cosmos/cosmos-sdk/x/mint/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types/proposal" + slashtypes "github.com/cosmos/cosmos-sdk/x/slashing/types" + staketypes "github.com/cosmos/cosmos-sdk/x/staking/types" + upgradetypes "github.com/cosmos/cosmos-sdk/x/upgrade/types" + + atypes "github.com/akash-network/akash-api/go/node/audit/v1beta4" + ctypes "github.com/akash-network/akash-api/go/node/cert/v1beta3" + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + mtypes "github.com/akash-network/akash-api/go/node/market/v1beta5" + ptypes "github.com/akash-network/akash-api/go/node/provider/v1beta4" +) + +var _ QueryClient = (*queryClient)(nil) + +type sdkQueryClient struct { + auth authtypes.QueryClient + authz authz.QueryClient + bank banktypes.QueryClient + distr disttypes.QueryClient + evidence evdtypes.QueryClient + feegrant feegranttypes.QueryClient + gov govtypes.QueryClient + mint minttypes.QueryClient + params paramtypes.QueryClient + slashing slashtypes.QueryClient + staking staketypes.QueryClient + upgrade upgradetypes.QueryClient +} + +type queryClient struct { + dclient dtypes.QueryClient + mclient mtypes.QueryClient + pclient ptypes.QueryClient + aclient atypes.QueryClient + cclient ctypes.QueryClient + sdk sdkQueryClient + cctx sdkclient.Context +} + +// NewQueryClient creates new query client instance based on a Cosmos SDK client context. +func NewQueryClient(cctx sdkclient.Context) QueryClient { + return newQueryClient(cctx) +} + +func newQueryClient(cctx sdkclient.Context) *queryClient { + return &queryClient{ + dclient: dtypes.NewQueryClient(cctx), + mclient: mtypes.NewQueryClient(cctx), + pclient: ptypes.NewQueryClient(cctx), + aclient: atypes.NewQueryClient(cctx), + cclient: ctypes.NewQueryClient(cctx), + sdk: sdkQueryClient{ + auth: authtypes.NewQueryClient(cctx), + authz: authz.NewQueryClient(cctx), + bank: banktypes.NewQueryClient(cctx), + distr: disttypes.NewQueryClient(cctx), + evidence: evdtypes.NewQueryClient(cctx), + feegrant: feegranttypes.NewQueryClient(cctx), + gov: govtypes.NewQueryClient(cctx), + mint: minttypes.NewQueryClient(cctx), + params: paramtypes.NewQueryClient(cctx), + slashing: slashtypes.NewQueryClient(cctx), + staking: staketypes.NewQueryClient(cctx), + upgrade: upgradetypes.NewQueryClient(cctx), + }, + cctx: cctx, + } +} + +// ClientContext returns the client's Cosmos SDK client context. +func (c *queryClient) ClientContext() sdkclient.Context { + return c.cctx +} + +// Deployments queries deployments. +func (c *queryClient) Deployments(ctx context.Context, in *dtypes.QueryDeploymentsRequest, opts ...grpc.CallOption) (*dtypes.QueryDeploymentsResponse, error) { + if c.dclient == nil { + return &dtypes.QueryDeploymentsResponse{}, ErrClientNotFound + } + return c.dclient.Deployments(ctx, in, opts...) +} + +// Deployment queries a deployment. +func (c *queryClient) Deployment(ctx context.Context, in *dtypes.QueryDeploymentRequest, opts ...grpc.CallOption) (*dtypes.QueryDeploymentResponse, error) { + if c.dclient == nil { + return &dtypes.QueryDeploymentResponse{}, ErrClientNotFound + } + return c.dclient.Deployment(ctx, in, opts...) +} + +// Group queries a group. +func (c *queryClient) Group(ctx context.Context, in *dtypes.QueryGroupRequest, opts ...grpc.CallOption) (*dtypes.QueryGroupResponse, error) { + if c.dclient == nil { + return &dtypes.QueryGroupResponse{}, ErrClientNotFound + } + return c.dclient.Group(ctx, in, opts...) +} + +// Orders queries orders. +func (c *queryClient) Orders(ctx context.Context, in *mtypes.QueryOrdersRequest, opts ...grpc.CallOption) (*mtypes.QueryOrdersResponse, error) { + if c.mclient == nil { + return &mtypes.QueryOrdersResponse{}, ErrClientNotFound + } + return c.mclient.Orders(ctx, in, opts...) +} + +// Order queries an order. +func (c *queryClient) Order(ctx context.Context, in *mtypes.QueryOrderRequest, opts ...grpc.CallOption) (*mtypes.QueryOrderResponse, error) { + if c.mclient == nil { + return &mtypes.QueryOrderResponse{}, ErrClientNotFound + } + return c.mclient.Order(ctx, in, opts...) +} + +// Bids queries bids. +func (c *queryClient) Bids(ctx context.Context, in *mtypes.QueryBidsRequest, opts ...grpc.CallOption) (*mtypes.QueryBidsResponse, error) { + if c.mclient == nil { + return &mtypes.QueryBidsResponse{}, ErrClientNotFound + } + return c.mclient.Bids(ctx, in, opts...) +} + +// Bid queries a specific bid. +func (c *queryClient) Bid(ctx context.Context, in *mtypes.QueryBidRequest, opts ...grpc.CallOption) (*mtypes.QueryBidResponse, error) { + if c.mclient == nil { + return &mtypes.QueryBidResponse{}, ErrClientNotFound + } + return c.mclient.Bid(ctx, in, opts...) +} + +// Leases queries leases. +func (c *queryClient) Leases(ctx context.Context, in *mtypes.QueryLeasesRequest, opts ...grpc.CallOption) (*mtypes.QueryLeasesResponse, error) { + if c.mclient == nil { + return &mtypes.QueryLeasesResponse{}, ErrClientNotFound + } + return c.mclient.Leases(ctx, in, opts...) +} + +// Lease queries a lease. +func (c *queryClient) Lease(ctx context.Context, in *mtypes.QueryLeaseRequest, opts ...grpc.CallOption) (*mtypes.QueryLeaseResponse, error) { + if c.mclient == nil { + return &mtypes.QueryLeaseResponse{}, ErrClientNotFound + } + return c.mclient.Lease(ctx, in, opts...) +} + +// Providers queries providers. +func (c *queryClient) Providers(ctx context.Context, in *ptypes.QueryProvidersRequest, opts ...grpc.CallOption) (*ptypes.QueryProvidersResponse, error) { + if c.pclient == nil { + return &ptypes.QueryProvidersResponse{}, ErrClientNotFound + } + return c.pclient.Providers(ctx, in, opts...) +} + +// Provider queries a provider. +func (c *queryClient) Provider(ctx context.Context, in *ptypes.QueryProviderRequest, opts ...grpc.CallOption) (*ptypes.QueryProviderResponse, error) { + if c.pclient == nil { + return &ptypes.QueryProviderResponse{}, ErrClientNotFound + } + return c.pclient.Provider(ctx, in, opts...) +} + +// AllProvidersAttributes queries all providers. +func (c *queryClient) AllProvidersAttributes(ctx context.Context, in *atypes.QueryAllProvidersAttributesRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { + if c.pclient == nil { + return &atypes.QueryProvidersResponse{}, ErrClientNotFound + } + return c.aclient.AllProvidersAttributes(ctx, in, opts...) +} + +// ProviderAttributes queries all provider signed attributes. +func (c *queryClient) ProviderAttributes(ctx context.Context, in *atypes.QueryProviderAttributesRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { + if c.pclient == nil { + return &atypes.QueryProvidersResponse{}, ErrClientNotFound + } + return c.aclient.ProviderAttributes(ctx, in, opts...) +} + +// ProviderAuditorAttributes queries provider signed attributes by specific validator. +func (c *queryClient) ProviderAuditorAttributes(ctx context.Context, in *atypes.QueryProviderAuditorRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { + if c.pclient == nil { + return &atypes.QueryProvidersResponse{}, ErrClientNotFound + } + return c.aclient.ProviderAuditorAttributes(ctx, in, opts...) +} + +// AuditorAttributes queries all providers signed by this validator. +func (c *queryClient) AuditorAttributes(ctx context.Context, in *atypes.QueryAuditorAttributesRequest, opts ...grpc.CallOption) (*atypes.QueryProvidersResponse, error) { + if c.aclient == nil { + return &atypes.QueryProvidersResponse{}, ErrClientNotFound + } + return c.aclient.AuditorAttributes(ctx, in, opts...) +} + +// Certificates queries certificates. +func (c *queryClient) Certificates(ctx context.Context, in *ctypes.QueryCertificatesRequest, opts ...grpc.CallOption) (*ctypes.QueryCertificatesResponse, error) { + if c.cclient == nil { + return &ctypes.QueryCertificatesResponse{}, ErrClientNotFound + } + return c.cclient.Certificates(ctx, in, opts...) +} + +// Auth implements QueryClient by returning the auth Cosmos SDK query client. +func (c *queryClient) Auth() authtypes.QueryClient { + return c.sdk.auth +} + +// Authz implements QueryClient by returning the authz Cosmos SDK query client. +func (c *queryClient) Authz() authz.QueryClient { + return c.sdk.authz +} + +// Bank implements QueryClient by returning the bank Cosmos SDK query client. +func (c *queryClient) Bank() banktypes.QueryClient { + return c.sdk.bank +} + +// Distribution implements QueryClient by returning the distribution Cosmos SDK query client. +func (c *queryClient) Distribution() disttypes.QueryClient { + return c.sdk.distr +} + +// Evidence implements QueryClient by returning the evidence Cosmos SDK query client. +func (c *queryClient) Evidence() evdtypes.QueryClient { + return c.sdk.evidence +} + +// Feegrant implements QueryClient by returning the feegrant Cosmos SDK query client. +func (c *queryClient) Feegrant() feegranttypes.QueryClient { + return c.sdk.feegrant +} + +// Gov implements QueryClient by returning the governance Cosmos SDK query client. +func (c *queryClient) Gov() govtypes.QueryClient { + return c.sdk.gov +} + +// Mint implements QueryClient by returning the mint Cosmos SDK query client. +func (c *queryClient) Mint() minttypes.QueryClient { + return c.sdk.mint +} + +// Params implements QueryClient by returning the params Cosmos SDK query client. +func (c *queryClient) Params() paramtypes.QueryClient { + return c.sdk.params +} + +// Slashing implements QueryClient by returning the slashing Cosmos SDK query client. +func (c *queryClient) Slashing() slashtypes.QueryClient { + return c.sdk.slashing +} + +// Staking implements QueryClient by returning the staking Cosmos SDK query client. +func (c *queryClient) Staking() staketypes.QueryClient { + return c.sdk.staking +} + +// Upgrade implements QueryClient by returning the upgrade Cosmos SDK query client. +func (c *queryClient) Upgrade() upgradetypes.QueryClient { + return c.sdk.upgrade +} diff --git a/go/node/client/v1beta3/tx.go b/go/node/client/v1beta3/tx.go new file mode 100644 index 00000000..b5c06d77 --- /dev/null +++ b/go/node/client/v1beta3/tx.go @@ -0,0 +1,650 @@ +package v1beta3 + +import ( + "bufio" + "context" + "encoding/hex" + "errors" + "fmt" + "os" + "strings" + "time" + "unsafe" + + "github.com/boz/go-lifecycle" + "github.com/cosmos/cosmos-sdk/client/flags" + "github.com/cosmos/cosmos-sdk/client/input" + "github.com/edwingeng/deque/v2" + "github.com/gogo/protobuf/proto" + + "github.com/tendermint/tendermint/libs/log" + ttypes "github.com/tendermint/tendermint/types" + + sdkclient "github.com/cosmos/cosmos-sdk/client" + "github.com/cosmos/cosmos-sdk/client/tx" + "github.com/cosmos/cosmos-sdk/crypto/keyring" + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + authtx "github.com/cosmos/cosmos-sdk/x/auth/tx" + + cltypes "github.com/akash-network/akash-api/go/node/client/types" + "github.com/akash-network/akash-api/go/util/ctxlog" +) + +var ( + ErrNotRunning = errors.New("tx client: not running") + ErrSyncTimedOut = errors.New("tx client: timed-out waiting for sequence sync") + ErrNodeCatchingUp = errors.New("tx client: cannot sync from catching up node") + ErrSimulateOffline = errors.New("tx client: cannot simulate tx in offline mode") + ErrBroadcastOffline = errors.New("tx client: cannot broadcast tx in offline mode") + ErrTxCanceledByUser = errors.New("tx client: transaction declined by user input") +) + +const ( + BroadcastDefaultTimeout = 30 * time.Second + BroadcastBlockRetryTimeout = 300 * time.Second + broadcastBlockRetryPeriod = time.Second + sequenceSyncTimeout = 30 * time.Second + + // sadface. + + // Only way to detect the timeout error. + // https://github.com/tendermint/tendermint/blob/46e06c97320bc61c4d98d3018f59d47ec69863c9/rpc/core/mempool.go#L124 + timeoutErrorMessage = "timed out waiting for tx to be included in a block" + + // Only way to check for tx not found error. + // https://github.com/tendermint/tendermint/blob/46e06c97320bc61c4d98d3018f59d47ec69863c9/rpc/core/tx.go#L31-L33 + notFoundErrorMessageSuffix = ") not found" +) + +var _ TxClient = (*serialBroadcaster)(nil) + +type ConfirmFn func(string) (bool, error) + +// BroadcastOptions defines the options allowed to configure a transaction broadcast. +type BroadcastOptions struct { + timeoutHeight *uint64 + gasAdjustment *float64 + gas *flags.GasSetting + gasPrices *string + fees *string + note *string + broadcastTimeout *time.Duration + resultAsError bool + skipConfirm *bool + confirmFn ConfirmFn +} + +// BroadcastOption is a function that takes as first argument a pointer to BroadcastOptions and returns an error +// if the option cannot be configured. A number of BroadcastOption functions are available in this package. +type BroadcastOption func(*BroadcastOptions) error + +// WithGasAdjustment returns a BroadcastOption that sets the gas adjustment configuration for the transaction. +func WithGasAdjustment(val float64) BroadcastOption { + return func(options *BroadcastOptions) error { + options.gasAdjustment = new(float64) + *options.gasAdjustment = val + return nil + } +} + +// WithNote returns a BroadcastOption that sets the note configuration for the transaction. +func WithNote(val string) BroadcastOption { + return func(options *BroadcastOptions) error { + options.note = new(string) + *options.note = val + return nil + } +} + +// WithGas returns a BroadcastOption that sets the gas setting configuration for the transaction. +func WithGas(val flags.GasSetting) BroadcastOption { + return func(options *BroadcastOptions) error { + options.gas = new(flags.GasSetting) + *options.gas = val + return nil + } +} + +// WithGasPrices returns a BroadcastOption that sets the gas price configuration for the transaction. +// Gas price is a string of the amount. E.g. "0.25uakt". +func WithGasPrices(val string) BroadcastOption { + return func(options *BroadcastOptions) error { + options.gasPrices = new(string) + *options.gasPrices = val + return nil + } +} + +// WithFees returns a BroadcastOption that sets the fees configuration for the transaction. +func WithFees(val string) BroadcastOption { + return func(options *BroadcastOptions) error { + options.fees = new(string) + *options.fees = val + return nil + } +} + +// WithTimeoutHeight returns a BroadcastOption that sets the timeout height configuration for the transaction. +func WithTimeoutHeight(val uint64) BroadcastOption { + return func(options *BroadcastOptions) error { + options.timeoutHeight = new(uint64) + *options.timeoutHeight = val + return nil + } +} + +// WithResultCodeAsError returns a BroadcastOption that enables the result code as error configuration for the transaction. +func WithResultCodeAsError() BroadcastOption { + return func(opts *BroadcastOptions) error { + opts.resultAsError = true + return nil + } +} + +// WithSkipConfirm returns a BroadcastOption that sets whether to skip or not the confirmation for the transaction. +func WithSkipConfirm(val bool) BroadcastOption { + return func(opts *BroadcastOptions) error { + opts.skipConfirm = new(bool) + *opts.skipConfirm = val + return nil + } +} + +// WithConfirmFn returns a BroadcastOption that sets the ConfirmFn function configuration for the transaction. +func WithConfirmFn(val ConfirmFn) BroadcastOption { + return func(opts *BroadcastOptions) error { + opts.confirmFn = val + return nil + } +} + +type broadcastResp struct { + resp interface{} + err error +} + +type broadcastReq struct { + id uintptr + responsech chan<- broadcastResp + msgs []sdk.Msg + opts *BroadcastOptions +} +type broadcastTxs struct { + msgs []sdk.Msg + opts *BroadcastOptions +} + +type seqResp struct { + seq uint64 + err error +} + +type seqReq struct { + curr uint64 + ch chan<- seqResp +} + +type broadcast struct { + donech chan<- error + respch chan<- broadcastResp + msgs []sdk.Msg + opts *BroadcastOptions +} + +type serialBroadcaster struct { + ctx context.Context + cctx sdkclient.Context + info keyring.Info + reqch chan broadcastReq + broadcastch chan broadcast + seqreqch chan seqReq + lc lifecycle.Lifecycle + nd *node + log log.Logger +} + +func newSerialTx(ctx context.Context, cctx sdkclient.Context, nd *node, opts ...cltypes.ClientOption) (*serialBroadcaster, error) { + txf, err := cltypes.NewTxFactory(cctx, opts...) + if err != nil { + return nil, err + } + + keyname := cctx.GetFromName() + info, err := txf.Keybase().Key(keyname) + if err != nil { + info, err = txf.Keybase().KeyByAddress(cctx.GetFromAddress()) + } + + if err != nil { + return nil, err + } + + client := &serialBroadcaster{ + ctx: ctx, + cctx: cctx, + info: info, + lc: lifecycle.New(), + reqch: make(chan broadcastReq, 1), + broadcastch: make(chan broadcast, 1), + seqreqch: make(chan seqReq), + nd: nd, + log: ctxlog.Logger(ctx).With("cmp", "client/broadcaster"), + } + + go client.lc.WatchContext(ctx) + go client.run() + go client.broadcaster(txf) + + if !client.cctx.Offline { + go client.sequenceSync() + } + + return client, nil +} + +// Broadcast broadcasts a transaction. A transaction is composed of 1 or many messages. This allows several +// operations to be performed in a single transaction. +// A transaction broadcast can be configured with an arbitrary number of BroadcastOption. +// This method returns the response as an interface{} instance. If an error occurs when preparing the transaction +// an error is returned. +// A transaction can fail with a given "transaction code" which will not be passed to the error value. +// This needs to be checked by the caller and handled accordingly. +func (c *serialBroadcaster) Broadcast(ctx context.Context, msgs []sdk.Msg, opts ...BroadcastOption) (interface{}, error) { + bOpts := &BroadcastOptions{ + confirmFn: defaultTxConfirm, + } + + for _, opt := range opts { + if err := opt(bOpts); err != nil { + return nil, err + } + } + + if bOpts.broadcastTimeout == nil { + bOpts.broadcastTimeout = new(time.Duration) + *bOpts.broadcastTimeout = BroadcastDefaultTimeout + } + + responsech := make(chan broadcastResp, 1) + request := broadcastReq{ + responsech: responsech, + msgs: msgs, + opts: bOpts, + } + + request.id = uintptr(unsafe.Pointer(&request)) + + select { + case c.reqch <- request: + case <-ctx.Done(): + return nil, ctx.Err() + case <-c.lc.ShuttingDown(): + return nil, ErrNotRunning + } + + select { + case resp := <-responsech: + // if returned error is sdk error, it is likely to be wrapped response so discard it + // as clients supposed to check Tx code, unless resp is nil, which is error during Tx preparation + if !errors.As(resp.err, &sdkerrors.Error{}) || resp.resp == nil || bOpts.resultAsError { + return resp.resp, resp.err + } + return resp.resp, nil + case <-ctx.Done(): + return nil, ctx.Err() + case <-c.lc.ShuttingDown(): + return nil, ErrNotRunning + } +} + +func (c *serialBroadcaster) run() { + defer c.lc.ShutdownCompleted() + + pending := deque.NewDeque[broadcastReq]() + broadcastCh := c.broadcastch + broadcastDoneCh := make(chan error, 1) + + tryBroadcast := func() { + if pending.Len() == 0 { + return + } + + req := pending.Peek(0) + + select { + case broadcastCh <- broadcast{ + donech: broadcastDoneCh, + respch: req.responsech, + msgs: req.msgs, + opts: req.opts, + }: + broadcastCh = nil + _ = pending.PopFront() + default: + } + } + +loop: + for { + select { + case err := <-c.lc.ShutdownRequest(): + c.lc.ShutdownInitiated(err) + break loop + case req := <-c.reqch: + pending.PushBack(req) + + tryBroadcast() + case err := <-broadcastDoneCh: + broadcastCh = c.broadcastch + + if err != nil { + c.log.Error("unable to broadcast messages", "error", err) + } + tryBroadcast() + } + } +} + +func deriveTxfFromOptions(txf tx.Factory, opts *BroadcastOptions) tx.Factory { + if opt := opts.note; opt != nil { + txf = txf.WithMemo(*opt) + } + + if opt := opts.gas; opt != nil { + txf = txf.WithGas(opt.Gas).WithSimulateAndExecute(opt.Simulate) + } + + if opt := opts.fees; opt != nil { + txf = txf.WithFees(*opt) + } + + if opt := opts.gasPrices; opt != nil { + txf = txf.WithGasPrices(*opt) + } + + if opt := opts.timeoutHeight; opt != nil { + txf = txf.WithTimeoutHeight(*opt) + } + + if opt := opts.gasAdjustment; opt != nil { + txf = txf.WithGasAdjustment(*opt) + } + + return txf +} + +func (c *serialBroadcaster) broadcaster(ptxf tx.Factory) { + syncSequence := func(f tx.Factory, rErr error) (uint64, bool) { + if rErr != nil { + if sdkerrors.ErrWrongSequence.Is(rErr) { + // attempt to sync account sequence + if rSeq, err := c.syncAccountSequence(f.Sequence()); err == nil { + return rSeq, true + } + + return f.Sequence(), true + } + } + + return f.Sequence(), false + } + + for { + select { + case <-c.lc.ShuttingDown(): + return + case req := <-c.broadcastch: + var err error + var resp interface{} + + done: + for i := 0; i < 2; i++ { + txf := deriveTxfFromOptions(ptxf, req.opts) + if c.cctx.GenerateOnly { + resp, err = c.generateTxs(txf, req.msgs...) + break done + } + + var rseq uint64 + txs := broadcastTxs{ + msgs: req.msgs, + opts: req.opts, + } + + resp, rseq, err = c.broadcastTxs(txf, txs) + ptxf = ptxf.WithSequence(rseq) + + rSeq, synced := syncSequence(ptxf, err) + ptxf = ptxf.WithSequence(rSeq) + + if !synced { + break done + } + } + + req.respch <- broadcastResp{ + resp: resp, + err: err, + } + + terr := &sdkerrors.Error{} + if !c.cctx.GenerateOnly && errors.Is(err, terr) { + rSeq, _ := syncSequence(ptxf, err) + ptxf = ptxf.WithSequence(rSeq) + } + + select { + case <-c.lc.ShuttingDown(): + return + case req.donech <- err: + } + } + } +} + +func (c *serialBroadcaster) sequenceSync() { + for { + select { + case <-c.lc.ShuttingDown(): + return + case req := <-c.seqreqch: + // reply back with current value if any error to occur + seq := seqResp{ + seq: req.curr, + } + + ndStatus, err := c.nd.SyncInfo(c.ctx) + if err != nil { + c.log.Error("cannot obtain node status to sync account sequence", "err", err) + seq.err = err + } + + if err == nil && ndStatus.CatchingUp { + c.log.Error("cannot sync account sequence from node that is catching up") + err = ErrNodeCatchingUp + } + + if err == nil { + // query sequence number + if _, seq.seq, err = c.cctx.AccountRetriever.GetAccountNumberSequence(c.cctx, c.info.GetAddress()); err != nil { + c.log.Error("error requesting account", "err", err) + seq.err = err + } + } + + select { + case req.ch <- seq: + case <-c.lc.ShuttingDown(): + } + } + } +} + +func (c *serialBroadcaster) generateTxs(txf tx.Factory, msgs ...sdk.Msg) ([]byte, error) { + if txf.SimulateAndExecute() { + if c.cctx.Offline { + return nil, ErrSimulateOffline + } + + _, adjusted, err := tx.CalculateGas(c.cctx, txf, msgs...) + if err != nil { + return nil, err + } + + txf = txf.WithGas(adjusted) + } + + utx, err := tx.BuildUnsignedTx(txf, msgs...) + if err != nil { + return nil, err + } + + data, err := c.cctx.TxConfig.TxJSONEncoder()(utx.GetTx()) + if err != nil { + return nil, err + } + + return data, nil +} + +func defaultTxConfirm(txn string) (bool, error) { + _, _ = fmt.Printf("%s\n\n", txn) + + buf := bufio.NewReader(os.Stdin) + + return input.GetConfirmation("confirm transaction before signing and broadcasting", buf, os.Stdin) +} + +func (c *serialBroadcaster) broadcastTxs(txf tx.Factory, txs broadcastTxs) (interface{}, uint64, error) { + var err error + var resp proto.Message + + if txf.SimulateAndExecute() || c.cctx.Simulate { + var adjusted uint64 + resp, adjusted, err = tx.CalculateGas(c.cctx, txf, txs.msgs...) + if err != nil { + return nil, txf.Sequence(), err + } + + txf = txf.WithGas(adjusted) + } + + if c.cctx.Simulate { + return resp, txf.Sequence(), nil + } + + txn, err := tx.BuildUnsignedTx(txf, txs.msgs...) + if err != nil { + return nil, txf.Sequence(), err + } + + if c.cctx.Offline { + return nil, txf.Sequence(), ErrBroadcastOffline + } + + if !c.cctx.SkipConfirm { + out, err := c.cctx.TxConfig.TxJSONEncoder()(txn.GetTx()) + if err != nil { + return nil, txf.Sequence(), err + } + + isYes, err := txs.opts.confirmFn(string(out)) + if err != nil { + return nil, txf.Sequence(), err + } + + if !isYes { + return nil, txf.Sequence(), ErrTxCanceledByUser + } + } + + txn.SetFeeGranter(c.cctx.GetFeeGranterAddress()) + + err = tx.Sign(txf, c.info.GetName(), txn, true) + if err != nil { + return nil, txf.Sequence(), err + } + + bytes, err := c.cctx.TxConfig.TxEncoder()(txn.GetTx()) + if err != nil { + return nil, txf.Sequence(), err + } + + response, err := c.doBroadcast(c.cctx, bytes, *txs.opts.broadcastTimeout) + if err != nil { + return response, txf.Sequence(), err + } + + txf = txf.WithSequence(txf.Sequence() + 1) + + if response.Code != 0 { + return response, txf.Sequence(), sdkerrors.ABCIError(response.Codespace, response.Code, response.RawLog) + } + + return response, txf.Sequence(), nil +} + +func (c *serialBroadcaster) syncAccountSequence(lSeq uint64) (uint64, error) { + ch := make(chan seqResp, 1) + + c.seqreqch <- seqReq{ + curr: lSeq, + ch: ch, + } + + ctx, cancel := context.WithTimeout(c.ctx, sequenceSyncTimeout) + defer cancel() + + select { + case rSeq := <-ch: + return rSeq.seq, rSeq.err + case <-ctx.Done(): + return lSeq, ErrSyncTimedOut + case <-c.lc.ShuttingDown(): + return lSeq, ErrNotRunning + } +} + +func (c *serialBroadcaster) doBroadcast(cctx sdkclient.Context, data []byte, timeout time.Duration) (*sdk.TxResponse, error) { + txb := ttypes.Tx(data) + hash := hex.EncodeToString(txb.Hash()) + + // broadcast-mode=block + // submit with mode commit/block + cres, err := cctx.BroadcastTxCommit(txb) + if err == nil { + // good job + return cres, nil + } else if !strings.HasSuffix(err.Error(), timeoutErrorMessage) { + return cres, err + } + + // timeout error, continue on to retry + // loop + lctx, cancel := context.WithTimeout(c.ctx, timeout) + defer cancel() + + for lctx.Err() == nil { + // wait up to one second + select { + case <-lctx.Done(): + return cres, err + case <-time.After(broadcastBlockRetryPeriod): + } + + // check transaction + // https://github.com/cosmos/cosmos-sdk/pull/8734 + res, err := authtx.QueryTx(cctx, hash) + if err == nil { + return res, nil + } + + // if it's not a "not found" error, return + if !strings.HasSuffix(err.Error(), notFoundErrorMessageSuffix) { + return res, err + } + } + + return cres, lctx.Err() +} diff --git a/go/node/deployment/v1beta3/errors.go b/go/node/deployment/v1beta3/errors.go index 55a61678..cd6956ca 100644 --- a/go/node/deployment/v1beta3/errors.go +++ b/go/node/deployment/v1beta3/errors.go @@ -1,81 +1,55 @@ package v1beta3 import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errNameDoesNotExist uint32 = iota + 1 - errInvalidRequest - errDeploymentExists - errDeploymentNotFound - errDeploymentClosed - errOwnerAcctMissing - errInvalidGroups - errInvalidDeploymentID - errEmptyVersion - errInvalidVersion - errInternal - errInvalidDeployment - errInvalidGroupID - errGroupNotFound - errGroupClosed - errGroupOpen - errGroupPaused - errGroupNotOpen - errGroupSpec - errInvalidDeposit - errInvalidIDPath - errInvalidParam - errInvalidDeploymentDepositor + "errors" ) var ( // ErrNameDoesNotExist is the error when name does not exist - ErrNameDoesNotExist = sdkerrors.Register(ModuleName, errNameDoesNotExist, "Name does not exist") + ErrNameDoesNotExist = errors.New("name does not exist") // ErrInvalidRequest is the error for invalid request - ErrInvalidRequest = sdkerrors.Register(ModuleName, errInvalidRequest, "Invalid request") + ErrInvalidRequest = errors.New("invalid request") // ErrDeploymentExists is the error when already deployment exists - ErrDeploymentExists = sdkerrors.Register(ModuleName, errDeploymentExists, "Deployment exists") + ErrDeploymentExists = errors.New("deployment exists") // ErrDeploymentNotFound is the error when deployment not found - ErrDeploymentNotFound = sdkerrors.Register(ModuleName, errDeploymentNotFound, "Deployment not found") + ErrDeploymentNotFound = errors.New("deployment not found") // ErrDeploymentClosed is the error when deployment is closed - ErrDeploymentClosed = sdkerrors.Register(ModuleName, errDeploymentClosed, "Deployment closed") + ErrDeploymentClosed = errors.New("deployment closed") // ErrOwnerAcctMissing is the error for owner account missing - ErrOwnerAcctMissing = sdkerrors.Register(ModuleName, errOwnerAcctMissing, "Owner account missing") + ErrOwnerAcctMissing = errors.New("owner account missing") // ErrInvalidGroups is the error when groups are empty - ErrInvalidGroups = sdkerrors.Register(ModuleName, errInvalidGroups, "Invalid groups") + ErrInvalidGroups = errors.New("invalid groups") // ErrInvalidDeploymentID is the error for invalid deployment id - ErrInvalidDeploymentID = sdkerrors.Register(ModuleName, errInvalidDeploymentID, "Invalid: deployment id") + ErrInvalidDeploymentID = errors.New("invalid: deployment id") // ErrEmptyVersion is the error when version is empty - ErrEmptyVersion = sdkerrors.Register(ModuleName, errEmptyVersion, "Invalid: empty version") + ErrEmptyVersion = errors.New("invalid: empty version") // ErrInvalidVersion is the error when version is invalid - ErrInvalidVersion = sdkerrors.Register(ModuleName, errInvalidVersion, "Invalid: deployment version") + ErrInvalidVersion = errors.New("invalid: deployment version") // ErrInternal is the error for internal error - ErrInternal = sdkerrors.Register(ModuleName, errInternal, "internal error") + ErrInternal = errors.New("internal error") // ErrInvalidDeployment = is the error when deployment does not pass validation - ErrInvalidDeployment = sdkerrors.Register(ModuleName, errInvalidDeployment, "Invalid deployment") + ErrInvalidDeployment = errors.New("invalid deployment") // ErrInvalidGroupID is the error when already deployment exists - ErrInvalidGroupID = sdkerrors.Register(ModuleName, errInvalidGroupID, "Deployment exists") + ErrInvalidGroupID = errors.New("deployment exists") // ErrGroupNotFound is the keeper's error for not finding a group - ErrGroupNotFound = sdkerrors.Register(ModuleName, errGroupNotFound, "Group not found") + ErrGroupNotFound = errors.New("group not found") // ErrGroupClosed is the error when deployment is closed - ErrGroupClosed = sdkerrors.Register(ModuleName, errGroupClosed, "Group already closed") + ErrGroupClosed = errors.New("group already closed") // ErrGroupOpen is the error when deployment is closed - ErrGroupOpen = sdkerrors.Register(ModuleName, errGroupOpen, "Group open") + ErrGroupOpen = errors.New("group open") // ErrGroupPaused is the error when deployment is closed - ErrGroupPaused = sdkerrors.Register(ModuleName, errGroupPaused, "Group paused") + ErrGroupPaused = errors.New("group paused") // ErrGroupNotOpen indicates the Group state has progressed beyond initial Open. - ErrGroupNotOpen = sdkerrors.Register(ModuleName, errGroupNotOpen, "Group not open") + ErrGroupNotOpen = errors.New("group not open") // ErrGroupSpecInvalid indicates a GroupSpec has invalid configuration - ErrGroupSpecInvalid = sdkerrors.Register(ModuleName, errGroupSpec, "GroupSpec invalid") + ErrGroupSpecInvalid = errors.New("groupSpec invalid") // ErrInvalidDeposit indicates an invalid deposit - ErrInvalidDeposit = sdkerrors.Register(ModuleName, errInvalidDeposit, "Deposit invalid") + ErrInvalidDeposit = errors.New("deposit invalid") // ErrInvalidIDPath indicates an invalid ID path - ErrInvalidIDPath = sdkerrors.Register(ModuleName, errInvalidIDPath, "ID path invalid") + ErrInvalidIDPath = errors.New("ID path invalid") // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = sdkerrors.Register(ModuleName, errInvalidParam, "parameter invalid") + ErrInvalidParam = errors.New("parameter invalid") // ErrInvalidDeploymentDepositor indicates an invalid chain parameter - ErrInvalidDeploymentDepositor = sdkerrors.Register(ModuleName, errInvalidDeploymentDepositor, "invalid deployment depositor") + ErrInvalidDeploymentDepositor = errors.New("invalid deployment depositor") ) diff --git a/go/node/deployment/v1beta4/authz.pb.go b/go/node/deployment/v1beta4/authz.pb.go new file mode 100644 index 00000000..554458f5 --- /dev/null +++ b/go/node/deployment/v1beta4/authz.pb.go @@ -0,0 +1,333 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/authz.proto + +package v1beta4 + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + _ "github.com/regen-network/cosmos-proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from +// the granter's account for a deployment. +type DepositDeploymentAuthorization struct { + // SpendLimit is the amount the grantee is authorized to spend from the granter's account for + // the purpose of deployment. + SpendLimit types.Coin `protobuf:"bytes,1,opt,name=spend_limit,json=spendLimit,proto3" json:"spend_limit"` +} + +func (m *DepositDeploymentAuthorization) Reset() { *m = DepositDeploymentAuthorization{} } +func (m *DepositDeploymentAuthorization) String() string { return proto.CompactTextString(m) } +func (*DepositDeploymentAuthorization) ProtoMessage() {} +func (*DepositDeploymentAuthorization) Descriptor() ([]byte, []int) { + return fileDescriptor_37efa6d6cd05f40d, []int{0} +} +func (m *DepositDeploymentAuthorization) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DepositDeploymentAuthorization) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DepositDeploymentAuthorization.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DepositDeploymentAuthorization) XXX_Merge(src proto.Message) { + xxx_messageInfo_DepositDeploymentAuthorization.Merge(m, src) +} +func (m *DepositDeploymentAuthorization) XXX_Size() int { + return m.Size() +} +func (m *DepositDeploymentAuthorization) XXX_DiscardUnknown() { + xxx_messageInfo_DepositDeploymentAuthorization.DiscardUnknown(m) +} + +var xxx_messageInfo_DepositDeploymentAuthorization proto.InternalMessageInfo + +func (m *DepositDeploymentAuthorization) GetSpendLimit() types.Coin { + if m != nil { + return m.SpendLimit + } + return types.Coin{} +} + +func init() { + proto.RegisterType((*DepositDeploymentAuthorization)(nil), "akash.deployment.v1beta4.DepositDeploymentAuthorization") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/authz.proto", fileDescriptor_37efa6d6cd05f40d) +} + +var fileDescriptor_37efa6d6cd05f40d = []byte{ + // 280 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, + 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x4f, 0x2c, 0x2d, 0xc9, 0xa8, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x92, 0x00, 0xab, 0xd2, 0x43, 0xa8, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, + 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0x24, 0x93, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, 0xe3, + 0x21, 0x12, 0x10, 0x0e, 0x54, 0x4a, 0x0e, 0xc2, 0xd3, 0x4f, 0x4a, 0x2c, 0x4e, 0x85, 0xda, 0x65, + 0xa8, 0x9f, 0x9c, 0x9f, 0x99, 0x07, 0x91, 0x57, 0x6a, 0x61, 0xe4, 0x92, 0x73, 0x49, 0x2d, 0xc8, + 0x2f, 0xce, 0x2c, 0x71, 0x81, 0x5b, 0xe7, 0x58, 0x5a, 0x92, 0x91, 0x5f, 0x94, 0x59, 0x95, 0x58, + 0x92, 0x99, 0x9f, 0x27, 0xe4, 0xcf, 0xc5, 0x5d, 0x5c, 0x90, 0x9a, 0x97, 0x12, 0x9f, 0x93, 0x99, + 0x9b, 0x59, 0x22, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0x24, 0xa9, 0x07, 0xb5, 0x06, 0x64, 0x30, + 0xd4, 0x79, 0x86, 0x7a, 0xce, 0xf9, 0x99, 0x79, 0x4e, 0xc2, 0x27, 0xee, 0xc9, 0x33, 0xbc, 0xba, + 0x27, 0x8f, 0xac, 0x2b, 0x88, 0x0b, 0xcc, 0xf1, 0x01, 0xb1, 0xad, 0x04, 0x2f, 0x6d, 0xd1, 0xe5, + 0x45, 0xb1, 0xc3, 0x29, 0xfc, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, + 0x63, 0x9c, 0xf0, 0x58, 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x6c, + 0xd3, 0x33, 0x4b, 0x32, 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0xc1, 0xa2, 0x9b, 0x97, + 0x5a, 0x52, 0x9e, 0x5f, 0x94, 0x0d, 0xe5, 0x25, 0x16, 0x64, 0xea, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, + 0xa7, 0xa4, 0x62, 0x09, 0xd6, 0x24, 0x36, 0xb0, 0x37, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x13, 0x8f, 0x91, 0x8d, 0x79, 0x01, 0x00, 0x00, +} + +func (m *DepositDeploymentAuthorization) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DepositDeploymentAuthorization) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DepositDeploymentAuthorization) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.SpendLimit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAuthz(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintAuthz(dAtA []byte, offset int, v uint64) int { + offset -= sovAuthz(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DepositDeploymentAuthorization) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SpendLimit.Size() + n += 1 + l + sovAuthz(uint64(l)) + return n +} + +func sovAuthz(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAuthz(x uint64) (n int) { + return sovAuthz(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DepositDeploymentAuthorization) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuthz + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DepositDeploymentAuthorization: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DepositDeploymentAuthorization: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SpendLimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAuthz + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAuthz + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAuthz + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SpendLimit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAuthz(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAuthz + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAuthz(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuthz + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuthz + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAuthz + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAuthz + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAuthz + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAuthz + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAuthz = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAuthz = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAuthz = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/codec.go b/go/node/deployment/v1beta4/codec.go new file mode 100644 index 00000000..02f81694 --- /dev/null +++ b/go/node/deployment/v1beta4/codec.go @@ -0,0 +1,58 @@ +package v1beta4 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" + "github.com/cosmos/cosmos-sdk/x/authz" +) + +var ( + amino = codec.NewLegacyAmino() + + // ModuleCdc references the global x/deployment module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/deployment and + // defined at the application level. + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +func init() { + RegisterLegacyAminoCodec(amino) + cryptocodec.RegisterCrypto(amino) + amino.Seal() +} + +// RegisterLegacyAminoCodec register concrete types on codec +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgCreateDeployment{}, ModuleName+"/"+MsgTypeCreateDeployment, nil) + cdc.RegisterConcrete(&MsgUpdateDeployment{}, ModuleName+"/"+MsgTypeUpdateDeployment, nil) + cdc.RegisterConcrete(&MsgDepositDeployment{}, ModuleName+"/"+MsgTypeDepositDeployment, nil) + cdc.RegisterConcrete(&MsgCloseDeployment{}, ModuleName+"/"+MsgTypeCloseDeployment, nil) + cdc.RegisterConcrete(&MsgCloseGroup{}, ModuleName+"/"+MsgTypeCloseGroup, nil) + cdc.RegisterConcrete(&MsgPauseGroup{}, ModuleName+"/"+MsgTypePauseGroup, nil) + cdc.RegisterConcrete(&MsgStartGroup{}, ModuleName+"/"+MsgTypeStartGroup, nil) +} + +// RegisterInterfaces registers the x/deployment interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCreateDeployment{}, + &MsgUpdateDeployment{}, + &MsgDepositDeployment{}, + &MsgCloseDeployment{}, + &MsgCloseGroup{}, + &MsgPauseGroup{}, + &MsgStartGroup{}, + ) + registry.RegisterImplementations( + (*authz.Authorization)(nil), + &DepositDeploymentAuthorization{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/deployment/v1beta4/deployment.pb.go b/go/node/deployment/v1beta4/deployment.pb.go new file mode 100644 index 00000000..fafcc760 --- /dev/null +++ b/go/node/deployment/v1beta4/deployment.pb.go @@ -0,0 +1,960 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/deployment.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of deployment +type Deployment_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + DeploymentStateInvalid Deployment_State = 0 + // DeploymentActive denotes state for deployment active + DeploymentActive Deployment_State = 1 + // DeploymentClosed denotes state for deployment closed + DeploymentClosed Deployment_State = 2 +) + +var Deployment_State_name = map[int32]string{ + 0: "invalid", + 1: "active", + 2: "closed", +} + +var Deployment_State_value = map[string]int32{ + "invalid": 0, + "active": 1, + "closed": 2, +} + +func (x Deployment_State) String() string { + return proto.EnumName(Deployment_State_name, int32(x)) +} + +func (Deployment_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_51e4b47f720c56d8, []int{1, 0} +} + +// DeploymentID stores owner and sequence number +type DeploymentID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` +} + +func (m *DeploymentID) Reset() { *m = DeploymentID{} } +func (*DeploymentID) ProtoMessage() {} +func (*DeploymentID) Descriptor() ([]byte, []int) { + return fileDescriptor_51e4b47f720c56d8, []int{0} +} +func (m *DeploymentID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeploymentID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeploymentID) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentID.Merge(m, src) +} +func (m *DeploymentID) XXX_Size() int { + return m.Size() +} +func (m *DeploymentID) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentID.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentID proto.InternalMessageInfo + +func (m *DeploymentID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *DeploymentID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +// Deployment stores deploymentID, state and version details +type Deployment struct { + DeploymentID DeploymentID `protobuf:"bytes,1,opt,name=deployment_id,json=deploymentId,proto3" json:"id" yaml:"id"` + State Deployment_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1beta4.Deployment_State" json:"state" yaml:"state"` + Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (m *Deployment) Reset() { *m = Deployment{} } +func (m *Deployment) String() string { return proto.CompactTextString(m) } +func (*Deployment) ProtoMessage() {} +func (*Deployment) Descriptor() ([]byte, []int) { + return fileDescriptor_51e4b47f720c56d8, []int{1} +} +func (m *Deployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Deployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Deployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Deployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_Deployment.Merge(m, src) +} +func (m *Deployment) XXX_Size() int { + return m.Size() +} +func (m *Deployment) XXX_DiscardUnknown() { + xxx_messageInfo_Deployment.DiscardUnknown(m) +} + +var xxx_messageInfo_Deployment proto.InternalMessageInfo + +func (m *Deployment) GetDeploymentID() DeploymentID { + if m != nil { + return m.DeploymentID + } + return DeploymentID{} +} + +func (m *Deployment) GetState() Deployment_State { + if m != nil { + return m.State + } + return DeploymentStateInvalid +} + +func (m *Deployment) GetVersion() []byte { + if m != nil { + return m.Version + } + return nil +} + +func (m *Deployment) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +// DeploymentFilters defines filters used to filter deployments +type DeploymentFilters struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + State string `protobuf:"bytes,3,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *DeploymentFilters) Reset() { *m = DeploymentFilters{} } +func (m *DeploymentFilters) String() string { return proto.CompactTextString(m) } +func (*DeploymentFilters) ProtoMessage() {} +func (*DeploymentFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_51e4b47f720c56d8, []int{2} +} +func (m *DeploymentFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DeploymentFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_DeploymentFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *DeploymentFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeploymentFilters.Merge(m, src) +} +func (m *DeploymentFilters) XXX_Size() int { + return m.Size() +} +func (m *DeploymentFilters) XXX_DiscardUnknown() { + xxx_messageInfo_DeploymentFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_DeploymentFilters proto.InternalMessageInfo + +func (m *DeploymentFilters) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *DeploymentFilters) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *DeploymentFilters) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func init() { + proto.RegisterEnum("akash.deployment.v1beta4.Deployment_State", Deployment_State_name, Deployment_State_value) + proto.RegisterType((*DeploymentID)(nil), "akash.deployment.v1beta4.DeploymentID") + proto.RegisterType((*Deployment)(nil), "akash.deployment.v1beta4.Deployment") + proto.RegisterType((*DeploymentFilters)(nil), "akash.deployment.v1beta4.DeploymentFilters") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/deployment.proto", fileDescriptor_51e4b47f720c56d8) +} + +var fileDescriptor_51e4b47f720c56d8 = []byte{ + // 508 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x93, 0xb1, 0x6f, 0xd3, 0x4e, + 0x14, 0xc7, 0x7d, 0x89, 0xd3, 0xfe, 0x72, 0xcd, 0xaf, 0x0a, 0x56, 0x85, 0x8c, 0xa5, 0xfa, 0x2c, + 0x0f, 0x34, 0x20, 0x61, 0x8b, 0x16, 0x09, 0x29, 0x12, 0x43, 0x4d, 0x84, 0x94, 0xd5, 0x1d, 0x90, + 0x60, 0xa8, 0x2e, 0xb9, 0x53, 0x7a, 0xaa, 0xe3, 0x4b, 0xed, 0x23, 0x55, 0x19, 0x98, 0x51, 0x27, + 0x46, 0x96, 0x4a, 0x95, 0xf8, 0x07, 0x58, 0xf9, 0x0f, 0x3a, 0x76, 0x64, 0x3a, 0x21, 0x67, 0x41, + 0x19, 0xf3, 0x17, 0x20, 0xdf, 0xa5, 0x38, 0x20, 0x40, 0x4c, 0x6c, 0x7e, 0x9f, 0xfb, 0x3e, 0xbf, + 0xef, 0xbb, 0x77, 0x0f, 0xde, 0xc3, 0xc7, 0x38, 0x3f, 0x0a, 0x09, 0x9d, 0x24, 0xfc, 0x6c, 0x4c, + 0x53, 0x11, 0x4e, 0x1f, 0x0e, 0xa8, 0xc0, 0x8f, 0x56, 0x50, 0x30, 0xc9, 0xb8, 0xe0, 0x96, 0xad, + 0xa4, 0xc1, 0x0a, 0x5f, 0x4a, 0x9d, 0xad, 0x11, 0x1f, 0x71, 0x25, 0x0a, 0xcb, 0x2f, 0xad, 0xf7, + 0xdf, 0xc0, 0x56, 0xef, 0xbb, 0xb6, 0xdf, 0xb3, 0x42, 0xd8, 0xe0, 0xa7, 0x29, 0xcd, 0x6c, 0xe0, + 0x81, 0x4e, 0x33, 0xba, 0x33, 0x97, 0x48, 0x83, 0x85, 0x44, 0xad, 0x33, 0x3c, 0x4e, 0xba, 0xbe, + 0x0a, 0xfd, 0x58, 0x63, 0x6b, 0x0f, 0x9a, 0x24, 0xa7, 0x27, 0x76, 0xcd, 0x03, 0x1d, 0x33, 0x42, + 0x85, 0x44, 0x66, 0xef, 0x80, 0x9e, 0xcc, 0x25, 0x52, 0x7c, 0x21, 0xd1, 0x86, 0x4e, 0x2b, 0x23, + 0x3f, 0x56, 0xb0, 0xfb, 0xdf, 0xfb, 0x4b, 0x64, 0x7c, 0xbd, 0x44, 0x86, 0xff, 0xa9, 0x0e, 0x61, + 0x65, 0xc0, 0x12, 0xf0, 0xff, 0xca, 0xfa, 0x21, 0x23, 0xca, 0xc6, 0xc6, 0xee, 0xdd, 0xe0, 0x77, + 0x6d, 0x05, 0xab, 0xee, 0xa3, 0x9d, 0x2b, 0x89, 0x8c, 0x42, 0xa2, 0x1f, 0x7a, 0x9a, 0x4b, 0x54, + 0x63, 0x64, 0x21, 0x51, 0x53, 0x1b, 0x61, 0xc4, 0x8f, 0x5b, 0xd5, 0x9f, 0xfa, 0xc4, 0x7a, 0x09, + 0x1b, 0xb9, 0xc0, 0x82, 0xaa, 0x26, 0x36, 0x77, 0xef, 0xff, 0x4d, 0xb5, 0xe0, 0xa0, 0xcc, 0xd0, + 0x17, 0xa4, 0x92, 0xab, 0x0b, 0x52, 0xa1, 0x1f, 0x6b, 0x6c, 0x3d, 0x86, 0xeb, 0x53, 0x9a, 0xe5, + 0x8c, 0xa7, 0x76, 0xdd, 0x03, 0x9d, 0x56, 0xb4, 0x3d, 0x97, 0xe8, 0x06, 0x2d, 0x24, 0xda, 0xd4, + 0x49, 0x4b, 0xe0, 0xc7, 0x37, 0x47, 0xd6, 0x36, 0x84, 0xc3, 0x8c, 0x62, 0x41, 0xc9, 0x21, 0x16, + 0xb6, 0xe9, 0x81, 0x4e, 0x3d, 0x6e, 0x2e, 0xc9, 0xbe, 0xf0, 0x5f, 0xc3, 0x86, 0xb2, 0x60, 0xed, + 0xc0, 0x75, 0x96, 0x4e, 0x71, 0xc2, 0x48, 0xdb, 0x70, 0x9c, 0xf3, 0x0b, 0xef, 0x76, 0xe5, 0x52, + 0x29, 0xfa, 0xfa, 0xd4, 0xf2, 0xe0, 0x1a, 0x1e, 0x0a, 0x36, 0xa5, 0x6d, 0xe0, 0x6c, 0x9d, 0x5f, + 0x78, 0xed, 0x4a, 0xb7, 0xaf, 0x78, 0xa9, 0x18, 0x26, 0x3c, 0xa7, 0xa4, 0x5d, 0xfb, 0x59, 0xf1, + 0x54, 0x71, 0xc7, 0x7c, 0xfb, 0xc1, 0x35, 0xba, 0xa6, 0x9a, 0xdd, 0x47, 0x00, 0x6f, 0x55, 0x82, + 0x67, 0x2c, 0x11, 0x34, 0xcb, 0xff, 0xcd, 0x0b, 0x2a, 0xab, 0xe8, 0x91, 0xd5, 0xab, 0x2a, 0x7f, + 0x1a, 0x83, 0xb6, 0x1c, 0x3d, 0xbf, 0x2a, 0x5c, 0x70, 0x5d, 0xb8, 0xe0, 0x4b, 0xe1, 0x82, 0x77, + 0x33, 0xd7, 0xb8, 0x9e, 0xb9, 0xc6, 0xe7, 0x99, 0x6b, 0xbc, 0x78, 0x32, 0x62, 0xe2, 0xe8, 0xd5, + 0x20, 0x18, 0xf2, 0x71, 0xa8, 0xc6, 0xff, 0x20, 0xa5, 0xe2, 0x94, 0x67, 0xc7, 0xcb, 0x08, 0x4f, + 0x58, 0x38, 0xe2, 0x61, 0xca, 0x09, 0xfd, 0xc5, 0x22, 0x0e, 0xd6, 0xd4, 0x3a, 0xed, 0x7d, 0x0b, + 0x00, 0x00, 0xff, 0xff, 0xdf, 0x81, 0x89, 0xdc, 0xab, 0x03, 0x00, 0x00, +} + +func (m *DeploymentID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.DSeq != 0 { + i = encodeVarintDeployment(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintDeployment(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Deployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Deployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Deployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreatedAt != 0 { + i = encodeVarintDeployment(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintDeployment(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + } + if m.State != 0 { + i = encodeVarintDeployment(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.DeploymentID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeployment(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *DeploymentFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DeploymentFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DeploymentFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintDeployment(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x1a + } + if m.DSeq != 0 { + i = encodeVarintDeployment(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintDeployment(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintDeployment(dAtA []byte, offset int, v uint64) int { + offset -= sovDeployment(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *DeploymentID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovDeployment(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovDeployment(uint64(m.DSeq)) + } + return n +} + +func (m *Deployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.DeploymentID.Size() + n += 1 + l + sovDeployment(uint64(l)) + if m.State != 0 { + n += 1 + sovDeployment(uint64(m.State)) + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovDeployment(uint64(l)) + } + if m.CreatedAt != 0 { + n += 1 + sovDeployment(uint64(m.CreatedAt)) + } + return n +} + +func (m *DeploymentFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovDeployment(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovDeployment(uint64(m.DSeq)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovDeployment(uint64(l)) + } + return n +} + +func sovDeployment(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDeployment(x uint64) (n int) { + return sovDeployment(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *DeploymentID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDeployment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDeployment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDeployment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeployment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Deployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Deployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Deployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DeploymentID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeployment + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeployment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.DeploymentID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Deployment_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDeployment + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDeployment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) + if m.Version == nil { + m.Version = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDeployment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeployment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DeploymentFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DeploymentFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DeploymentFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDeployment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDeployment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeployment + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDeployment + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDeployment + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDeployment(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeployment + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDeployment(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeployment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeployment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeployment + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDeployment + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDeployment + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDeployment + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDeployment = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDeployment = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDeployment = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/deployment_validation_test.go b/go/node/deployment/v1beta4/deployment_validation_test.go new file mode 100644 index 00000000..b263cf55 --- /dev/null +++ b/go/node/deployment/v1beta4/deployment_validation_test.go @@ -0,0 +1,202 @@ +package v1beta4_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + + testutil "github.com/akash-network/akash-api/go/node/client/testutil/v1beta3" + types "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + akashtypes "github.com/akash-network/akash-api/go/node/types/resources/v1" + tutil "github.com/akash-network/akash-api/go/testutil" +) + +const ( + regexInvalidUnitBoundaries = `^.*invalid unit count|CPU|GPU|memory|storage \(\d+ > 0 > \d+ fails\)$` +) + +func TestZeroValueGroupSpec(t *testing.T) { + did := testutil.DeploymentID(t) + + dgroup := testutil.DeploymentGroup(t, did, uint32(6)) + gspec := dgroup.GroupSpec + + t.Run("assert nominal test success", func(t *testing.T) { + err := gspec.ValidateBasic() + require.NoError(t, err) + }) +} + +func TestZeroValueGroupSpecs(t *testing.T) { + did := testutil.DeploymentID(t) + dgroups := testutil.DeploymentGroups(t, did, uint32(6)) + gspecs := make([]types.GroupSpec, 0) + for _, d := range dgroups { + gspecs = append(gspecs, d.GroupSpec) + } + + t.Run("assert nominal test success", func(t *testing.T) { + err := types.ValidateDeploymentGroups(gspecs) + require.NoError(t, err) + }) + + gspecZeroed := make([]types.GroupSpec, len(gspecs)) + gspecZeroed = append(gspecZeroed, gspecs...) + t.Run("assert error for zero value bid duration", func(t *testing.T) { + err := types.ValidateDeploymentGroups(gspecZeroed) + require.Error(t, err) + }) +} + +func TestEmptyGroupSpecIsInvalid(t *testing.T) { + err := types.ValidateDeploymentGroups(make([]types.GroupSpec, 0)) + require.Equal(t, types.ErrInvalidGroups, err) +} + +func validSimpleGroupSpec() types.GroupSpec { + resources := make(types.ResourceUnits, 1) + resources[0] = types.ResourceUnit{ + Resources: akashtypes.Resources{ + ID: 1, + CPU: &akashtypes.CPU{ + Units: akashtypes.ResourceValue{ + Val: sdk.NewInt(10), + }, + Attributes: nil, + }, + GPU: &akashtypes.GPU{ + Units: akashtypes.ResourceValue{ + Val: sdk.NewInt(0), + }, + Attributes: nil, + }, + Memory: &akashtypes.Memory{ + Quantity: akashtypes.ResourceValue{ + Val: sdk.NewIntFromUint64(types.GetValidationConfig().Unit.Min.Memory), + }, + Attributes: nil, + }, + Storage: akashtypes.Volumes{ + akashtypes.Storage{ + Quantity: akashtypes.ResourceValue{ + Val: sdk.NewIntFromUint64(types.GetValidationConfig().Unit.Min.Storage), + }, + Attributes: nil, + }, + }, + Endpoints: akashtypes.Endpoints{}, + }, + Count: 1, + Price: sdk.NewInt64DecCoin(tutil.CoinDenom, 1), + } + return types.GroupSpec{ + Name: "testGroup", + Requirements: akashtypes.PlacementRequirements{}, + Resources: resources, + } +} + +func validSimpleGroupSpecs() []types.GroupSpec { + result := make([]types.GroupSpec, 1) + result[0] = validSimpleGroupSpec() + + return result +} + +func TestSimpleGroupSpecIsValid(t *testing.T) { + groups := validSimpleGroupSpecs() + err := types.ValidateDeploymentGroups(groups) + require.NoError(t, err) +} + +func TestDuplicateSimpleGroupSpecIsInvalid(t *testing.T) { + groups := validSimpleGroupSpecs() + groupsDuplicate := make([]types.GroupSpec, 2) + groupsDuplicate[0] = groups[0] + groupsDuplicate[1] = groups[0] + err := types.ValidateDeploymentGroups(groupsDuplicate) + require.Error(t, err) // TODO - specific error + require.Regexp(t, "^.*duplicate.*$", err) +} + +func TestGroupWithZeroCount(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Count = 0 + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, regexInvalidUnitBoundaries, err) +} + +func TestGroupWithZeroCPU(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].CPU.Units.Val = sdk.NewInt(0) + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, regexInvalidUnitBoundaries, err) +} + +func TestGroupWithZeroMemory(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Memory.Quantity.Val = sdk.NewInt(0) + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, regexInvalidUnitBoundaries, err) +} + +func TestGroupWithZeroStorage(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Storage[0].Quantity.Val = sdk.NewInt(0) + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, regexInvalidUnitBoundaries, err) +} + +func TestGroupWithNilCPU(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].CPU = nil + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid unit CPU.*$", err) +} + +func TestGroupWithNilGPU(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].GPU = nil + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid unit GPU.*$", err) +} + +func TestGroupWithNilMemory(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Memory = nil + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid unit memory.*$", err) +} + +func TestGroupWithNilStorage(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Storage = nil + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid unit storage.*$", err) +} + +func TestGroupWithInvalidPrice(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Price = sdk.DecCoin{} + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid price object.*$", err) +} + +func TestGroupWithNegativePrice(t *testing.T) { + group := validSimpleGroupSpec() + group.Resources[0].Price.Amount = sdk.NewDec(-1) + err := group.ValidateBasic() + require.Error(t, err) + require.Regexp(t, "^.*invalid price object.*$", err) +} diff --git a/go/node/deployment/v1beta4/deploymentmsg.pb.go b/go/node/deployment/v1beta4/deploymentmsg.pb.go new file mode 100644 index 00000000..e9b57862 --- /dev/null +++ b/go/node/deployment/v1beta4/deploymentmsg.pb.go @@ -0,0 +1,1722 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/deploymentmsg.proto + +package v1beta4 + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCreateDeployment defines an SDK message for creating deployment +type MsgCreateDeployment struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Groups []GroupSpec `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` + Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` + Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` + // Depositor pays for the deposit + Depositor string `protobuf:"bytes,5,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` +} + +func (m *MsgCreateDeployment) Reset() { *m = MsgCreateDeployment{} } +func (m *MsgCreateDeployment) String() string { return proto.CompactTextString(m) } +func (*MsgCreateDeployment) ProtoMessage() {} +func (*MsgCreateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{0} +} +func (m *MsgCreateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateDeployment.Merge(m, src) +} +func (m *MsgCreateDeployment) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateDeployment proto.InternalMessageInfo + +func (m *MsgCreateDeployment) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +func (m *MsgCreateDeployment) GetGroups() []GroupSpec { + if m != nil { + return m.Groups + } + return nil +} + +func (m *MsgCreateDeployment) GetVersion() []byte { + if m != nil { + return m.Version + } + return nil +} + +func (m *MsgCreateDeployment) GetDeposit() types.Coin { + if m != nil { + return m.Deposit + } + return types.Coin{} +} + +func (m *MsgCreateDeployment) GetDepositor() string { + if m != nil { + return m.Depositor + } + return "" +} + +// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. +type MsgCreateDeploymentResponse struct { +} + +func (m *MsgCreateDeploymentResponse) Reset() { *m = MsgCreateDeploymentResponse{} } +func (m *MsgCreateDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateDeploymentResponse) ProtoMessage() {} +func (*MsgCreateDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{1} +} +func (m *MsgCreateDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateDeploymentResponse.Merge(m, src) +} +func (m *MsgCreateDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateDeploymentResponse proto.InternalMessageInfo + +// MsgDepositDeployment deposits more funds into the deposit account +type MsgDepositDeployment struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Amount types.Coin `protobuf:"bytes,2,opt,name=amount,proto3" json:"amount" yaml:"amount"` + // Depositor pays for the deposit + Depositor string `protobuf:"bytes,3,opt,name=depositor,proto3" json:"depositor" yaml:"depositor"` +} + +func (m *MsgDepositDeployment) Reset() { *m = MsgDepositDeployment{} } +func (m *MsgDepositDeployment) String() string { return proto.CompactTextString(m) } +func (*MsgDepositDeployment) ProtoMessage() {} +func (*MsgDepositDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{2} +} +func (m *MsgDepositDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDepositDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDepositDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDepositDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDepositDeployment.Merge(m, src) +} +func (m *MsgDepositDeployment) XXX_Size() int { + return m.Size() +} +func (m *MsgDepositDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDepositDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDepositDeployment proto.InternalMessageInfo + +func (m *MsgDepositDeployment) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +func (m *MsgDepositDeployment) GetAmount() types.Coin { + if m != nil { + return m.Amount + } + return types.Coin{} +} + +func (m *MsgDepositDeployment) GetDepositor() string { + if m != nil { + return m.Depositor + } + return "" +} + +// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. +type MsgDepositDeploymentResponse struct { +} + +func (m *MsgDepositDeploymentResponse) Reset() { *m = MsgDepositDeploymentResponse{} } +func (m *MsgDepositDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDepositDeploymentResponse) ProtoMessage() {} +func (*MsgDepositDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{3} +} +func (m *MsgDepositDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDepositDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDepositDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDepositDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDepositDeploymentResponse.Merge(m, src) +} +func (m *MsgDepositDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDepositDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDepositDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDepositDeploymentResponse proto.InternalMessageInfo + +// MsgUpdateDeployment defines an SDK message for updating deployment +type MsgUpdateDeployment struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` + Version []byte `protobuf:"bytes,3,opt,name=version,proto3" json:"version" yaml:"version"` +} + +func (m *MsgUpdateDeployment) Reset() { *m = MsgUpdateDeployment{} } +func (m *MsgUpdateDeployment) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateDeployment) ProtoMessage() {} +func (*MsgUpdateDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{4} +} +func (m *MsgUpdateDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateDeployment.Merge(m, src) +} +func (m *MsgUpdateDeployment) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateDeployment proto.InternalMessageInfo + +func (m *MsgUpdateDeployment) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +func (m *MsgUpdateDeployment) GetVersion() []byte { + if m != nil { + return m.Version + } + return nil +} + +// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. +type MsgUpdateDeploymentResponse struct { +} + +func (m *MsgUpdateDeploymentResponse) Reset() { *m = MsgUpdateDeploymentResponse{} } +func (m *MsgUpdateDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateDeploymentResponse) ProtoMessage() {} +func (*MsgUpdateDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{5} +} +func (m *MsgUpdateDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateDeploymentResponse.Merge(m, src) +} +func (m *MsgUpdateDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateDeploymentResponse proto.InternalMessageInfo + +// MsgCloseDeployment defines an SDK message for closing deployment +type MsgCloseDeployment struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCloseDeployment) Reset() { *m = MsgCloseDeployment{} } +func (m *MsgCloseDeployment) String() string { return proto.CompactTextString(m) } +func (*MsgCloseDeployment) ProtoMessage() {} +func (*MsgCloseDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{6} +} +func (m *MsgCloseDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseDeployment.Merge(m, src) +} +func (m *MsgCloseDeployment) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseDeployment proto.InternalMessageInfo + +func (m *MsgCloseDeployment) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. +type MsgCloseDeploymentResponse struct { +} + +func (m *MsgCloseDeploymentResponse) Reset() { *m = MsgCloseDeploymentResponse{} } +func (m *MsgCloseDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCloseDeploymentResponse) ProtoMessage() {} +func (*MsgCloseDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9b10e8e78e405ddf, []int{7} +} +func (m *MsgCloseDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseDeploymentResponse.Merge(m, src) +} +func (m *MsgCloseDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseDeploymentResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCreateDeployment)(nil), "akash.deployment.v1beta4.MsgCreateDeployment") + proto.RegisterType((*MsgCreateDeploymentResponse)(nil), "akash.deployment.v1beta4.MsgCreateDeploymentResponse") + proto.RegisterType((*MsgDepositDeployment)(nil), "akash.deployment.v1beta4.MsgDepositDeployment") + proto.RegisterType((*MsgDepositDeploymentResponse)(nil), "akash.deployment.v1beta4.MsgDepositDeploymentResponse") + proto.RegisterType((*MsgUpdateDeployment)(nil), "akash.deployment.v1beta4.MsgUpdateDeployment") + proto.RegisterType((*MsgUpdateDeploymentResponse)(nil), "akash.deployment.v1beta4.MsgUpdateDeploymentResponse") + proto.RegisterType((*MsgCloseDeployment)(nil), "akash.deployment.v1beta4.MsgCloseDeployment") + proto.RegisterType((*MsgCloseDeploymentResponse)(nil), "akash.deployment.v1beta4.MsgCloseDeploymentResponse") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/deploymentmsg.proto", fileDescriptor_9b10e8e78e405ddf) +} + +var fileDescriptor_9b10e8e78e405ddf = []byte{ + // 535 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x54, 0x41, 0x8b, 0xd3, 0x4c, + 0x18, 0x6e, 0xd2, 0xfd, 0xba, 0x74, 0xf6, 0x53, 0x24, 0xee, 0x21, 0xd6, 0x6d, 0xa6, 0x3b, 0x82, + 0x54, 0xd0, 0x84, 0xae, 0x82, 0xb0, 0x20, 0x42, 0xb6, 0x20, 0x7b, 0xd8, 0x4b, 0x64, 0x15, 0xc4, + 0x4b, 0xda, 0x0c, 0xd9, 0x61, 0x9b, 0xbc, 0x31, 0x93, 0xae, 0xec, 0x3f, 0xf0, 0xe8, 0x4f, 0x10, + 0xaf, 0xfe, 0x91, 0x3d, 0xee, 0xd1, 0xd3, 0x20, 0xed, 0x45, 0x7a, 0xec, 0x2f, 0x90, 0x64, 0x26, + 0x8d, 0xd5, 0x16, 0x65, 0x61, 0xbd, 0x65, 0xde, 0xe7, 0x79, 0xde, 0x3c, 0xf3, 0xbc, 0x2f, 0x83, + 0x1e, 0xfa, 0xa7, 0x3e, 0x3f, 0x71, 0x02, 0x9a, 0x8c, 0xe0, 0x3c, 0xa2, 0x71, 0xe6, 0x9c, 0xf5, + 0x06, 0x34, 0xf3, 0x9f, 0xfc, 0x54, 0x8a, 0x78, 0x68, 0x27, 0x29, 0x64, 0x60, 0x98, 0x05, 0xdb, + 0xae, 0x20, 0x5b, 0xb1, 0x5b, 0xdb, 0x21, 0x84, 0x50, 0x90, 0x9c, 0xfc, 0x4b, 0xf2, 0x5b, 0x0f, + 0xfe, 0xa2, 0xbb, 0xa2, 0x76, 0xd7, 0x52, 0xc3, 0x14, 0xc6, 0x09, 0x4f, 0xe8, 0x50, 0x31, 0xad, + 0x21, 0xf0, 0x08, 0xb8, 0x33, 0xf0, 0x39, 0x55, 0xa4, 0x9e, 0x33, 0x04, 0x16, 0x4b, 0x9c, 0x7c, + 0xae, 0xa3, 0xdb, 0x47, 0x3c, 0x3c, 0x48, 0xa9, 0x9f, 0xd1, 0xfe, 0xa2, 0x9f, 0x71, 0x8c, 0x74, + 0x16, 0x98, 0x5a, 0x47, 0xeb, 0x6e, 0xed, 0xdd, 0xb7, 0xd7, 0xdd, 0xc4, 0xae, 0x14, 0x87, 0x7d, + 0xb7, 0x7d, 0x21, 0x70, 0x6d, 0x22, 0xb0, 0x7e, 0xd8, 0x9f, 0x09, 0xac, 0xb3, 0x60, 0x2e, 0x70, + 0xf3, 0xdc, 0x8f, 0x46, 0xfb, 0x84, 0x05, 0xc4, 0xd3, 0x59, 0x60, 0xbc, 0x45, 0x0d, 0xe9, 0xd0, + 0xd4, 0x3b, 0xf5, 0xee, 0xd6, 0xde, 0xbd, 0xf5, 0xad, 0x5f, 0xe4, 0xbc, 0x97, 0x09, 0x1d, 0xba, + 0x38, 0xef, 0x3b, 0x13, 0x58, 0x49, 0xe7, 0x02, 0xdf, 0x90, 0x5d, 0xe5, 0x99, 0x78, 0x0a, 0x30, + 0x9e, 0xa2, 0xcd, 0x33, 0x9a, 0x72, 0x06, 0xb1, 0x59, 0xef, 0x68, 0xdd, 0xff, 0xdd, 0xf6, 0x4c, + 0xe0, 0xb2, 0x34, 0x17, 0xf8, 0xa6, 0x94, 0xa9, 0x02, 0xf1, 0x4a, 0xc8, 0x78, 0x85, 0x36, 0x03, + 0x9a, 0x00, 0x67, 0x99, 0xb9, 0x51, 0x5c, 0xf9, 0x8e, 0x2d, 0x73, 0xb3, 0xf3, 0xdc, 0x94, 0xa5, + 0x9e, 0x7d, 0x00, 0x2c, 0x76, 0x77, 0x95, 0x9b, 0x52, 0x51, 0xf5, 0x55, 0x05, 0xe2, 0x95, 0x90, + 0xf1, 0x1c, 0x35, 0xd5, 0x27, 0xa4, 0xe6, 0x7f, 0x1d, 0xad, 0xdb, 0x74, 0x77, 0x67, 0x02, 0x57, + 0xc5, 0xb9, 0xc0, 0xb7, 0x96, 0xc4, 0x90, 0x12, 0xaf, 0x82, 0xf7, 0x37, 0xbe, 0x7f, 0xc2, 0x35, + 0xd2, 0x46, 0x77, 0x57, 0xcc, 0xc8, 0xa3, 0x3c, 0x81, 0x98, 0x53, 0xf2, 0x41, 0x47, 0xdb, 0x47, + 0x3c, 0xec, 0x4b, 0xd5, 0xf5, 0x0f, 0xd1, 0x43, 0x0d, 0x3f, 0x82, 0x71, 0x9c, 0x99, 0xfa, 0x9f, + 0xc2, 0x5a, 0x8c, 0x4e, 0x0a, 0xaa, 0xd1, 0xc9, 0x33, 0xf1, 0x14, 0xb0, 0x9c, 0x54, 0xfd, 0xca, + 0x49, 0x59, 0x68, 0x67, 0x55, 0x12, 0x8b, 0xa8, 0xbe, 0x68, 0xc5, 0xba, 0x1f, 0x27, 0xc1, 0x3f, + 0x59, 0xf7, 0xab, 0x2e, 0xe4, 0xd2, 0xdc, 0x7f, 0x35, 0xbb, 0xb8, 0xcc, 0x3b, 0x64, 0xe4, 0x6b, + 0x31, 0x02, 0x7e, 0xfd, 0x57, 0x51, 0x8e, 0x76, 0x50, 0xeb, 0xf7, 0x5f, 0x96, 0x86, 0xdc, 0xd7, + 0x17, 0x13, 0x4b, 0xbb, 0x9c, 0x58, 0xda, 0xb7, 0x89, 0xa5, 0x7d, 0x9c, 0x5a, 0xb5, 0xcb, 0xa9, + 0x55, 0xfb, 0x3a, 0xb5, 0x6a, 0x6f, 0x9e, 0x85, 0x2c, 0x3b, 0x19, 0x0f, 0xec, 0x21, 0x44, 0x4e, + 0x61, 0xe9, 0x51, 0x4c, 0xb3, 0xf7, 0x90, 0x9e, 0xaa, 0x93, 0x9f, 0x30, 0x27, 0x04, 0x27, 0x86, + 0x80, 0xae, 0x78, 0xd5, 0x06, 0x8d, 0xe2, 0xb1, 0x7a, 0xfc, 0x23, 0x00, 0x00, 0xff, 0xff, 0x61, + 0xa3, 0x48, 0x7e, 0x81, 0x05, 0x00, 0x00, +} + +func (m *MsgCreateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Depositor) > 0 { + i -= len(m.Depositor) + copy(dAtA[i:], m.Depositor) + i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Depositor))) + i-- + dAtA[i] = 0x2a + } + { + size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + } + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCreateDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgDepositDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDepositDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDepositDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Depositor) > 0 { + i -= len(m.Depositor) + copy(dAtA[i:], m.Depositor) + i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Depositor))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.Amount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgDepositDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDepositDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDepositDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Version) > 0 { + i -= len(m.Version) + copy(dAtA[i:], m.Version) + i = encodeVarintDeploymentmsg(dAtA, i, uint64(len(m.Version))) + i-- + dAtA[i] = 0x1a + } + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgUpdateDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCloseDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintDeploymentmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCloseDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintDeploymentmsg(dAtA []byte, offset int, v uint64) int { + offset -= sovDeploymentmsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCreateDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovDeploymentmsg(uint64(l)) + } + l = m.Deposit.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + l = len(m.Depositor) + if l > 0 { + n += 1 + l + sovDeploymentmsg(uint64(l)) + } + return n +} + +func (m *MsgCreateDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgDepositDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + l = m.Amount.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + l = len(m.Depositor) + if l > 0 { + n += 1 + l + sovDeploymentmsg(uint64(l)) + } + return n +} + +func (m *MsgDepositDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + l = len(m.Version) + if l > 0 { + n += 1 + l + sovDeploymentmsg(uint64(l)) + } + return n +} + +func (m *MsgUpdateDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCloseDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovDeploymentmsg(uint64(l)) + return n +} + +func (m *MsgCloseDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovDeploymentmsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozDeploymentmsg(x uint64) (n int) { + return sovDeploymentmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCreateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, GroupSpec{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) + if m.Version == nil { + m.Version = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Depositor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDepositDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDepositDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDepositDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Amount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Amount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Depositor", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Depositor = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDepositDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDepositDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDepositDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = append(m.Version[:0], dAtA[iNdEx:postIndex]...) + if m.Version == nil { + m.Version = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthDeploymentmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthDeploymentmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipDeploymentmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthDeploymentmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDeploymentmsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDeploymentmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthDeploymentmsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupDeploymentmsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthDeploymentmsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthDeploymentmsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDeploymentmsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupDeploymentmsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/deposit_deployment_authorization.go b/go/node/deployment/v1beta4/deposit_deployment_authorization.go new file mode 100644 index 00000000..cf68a228 --- /dev/null +++ b/go/node/deployment/v1beta4/deposit_deployment_authorization.go @@ -0,0 +1,45 @@ +package v1beta4 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/cosmos/cosmos-sdk/x/authz" +) + +var ( + _ authz.Authorization = &DepositDeploymentAuthorization{} +) + +// NewDepositDeploymentAuthorization creates a new DepositDeploymentAuthorization object. +func NewDepositDeploymentAuthorization(spendLimit sdk.Coin) *DepositDeploymentAuthorization { + return &DepositDeploymentAuthorization{ + SpendLimit: spendLimit, + } +} + +// MsgTypeURL implements Authorization.MsgTypeURL. +func (m DepositDeploymentAuthorization) MsgTypeURL() string { + return sdk.MsgTypeURL(&MsgDepositDeployment{}) +} + +// Accept implements Authorization.Accept. +func (m DepositDeploymentAuthorization) Accept(_ sdk.Context, msg sdk.Msg) (authz.AcceptResponse, error) { + mDepositDeployment, ok := msg.(*MsgDepositDeployment) + if !ok { + return authz.AcceptResponse{}, sdkerrors.ErrInvalidType.Wrap("type mismatch") + } + if m.SpendLimit.IsLT(mDepositDeployment.Amount) { + return authz.AcceptResponse{}, sdkerrors.ErrInsufficientFunds.Wrapf("requested amount is more than spend limit") + } + limitLeft := m.SpendLimit.Sub(mDepositDeployment.Amount) + + return authz.AcceptResponse{Accept: true, Delete: false, Updated: &DepositDeploymentAuthorization{SpendLimit: limitLeft}}, nil +} + +// ValidateBasic implements Authorization.ValidateBasic. +func (m DepositDeploymentAuthorization) ValidateBasic() error { + if !m.SpendLimit.IsPositive() { + return sdkerrors.ErrInvalidCoins.Wrapf("spend limit cannot be negative") + } + return nil +} diff --git a/go/node/deployment/v1beta4/errors.go b/go/node/deployment/v1beta4/errors.go new file mode 100644 index 00000000..a7a30837 --- /dev/null +++ b/go/node/deployment/v1beta4/errors.go @@ -0,0 +1,81 @@ +package v1beta4 + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + errNameDoesNotExist uint32 = iota + 1 + errInvalidRequest + errDeploymentExists + errDeploymentNotFound + errDeploymentClosed + errOwnerAcctMissing + errInvalidGroups + errInvalidDeploymentID + errEmptyVersion + errInvalidVersion + errInternal + errInvalidDeployment + errInvalidGroupID + errGroupNotFound + errGroupClosed + errGroupOpen + errGroupPaused + errGroupNotOpen + errGroupSpec + errInvalidDeposit + errInvalidIDPath + errInvalidParam + errInvalidDeploymentDepositor +) + +var ( + // ErrNameDoesNotExist is the error when name does not exist + ErrNameDoesNotExist = sdkerrors.Register(ModuleName, errNameDoesNotExist, "Name does not exist") + // ErrInvalidRequest is the error for invalid request + ErrInvalidRequest = sdkerrors.Register(ModuleName, errInvalidRequest, "Invalid request") + // ErrDeploymentExists is the error when already deployment exists + ErrDeploymentExists = sdkerrors.Register(ModuleName, errDeploymentExists, "Deployment exists") + // ErrDeploymentNotFound is the error when deployment not found + ErrDeploymentNotFound = sdkerrors.Register(ModuleName, errDeploymentNotFound, "Deployment not found") + // ErrDeploymentClosed is the error when deployment is closed + ErrDeploymentClosed = sdkerrors.Register(ModuleName, errDeploymentClosed, "Deployment closed") + // ErrOwnerAcctMissing is the error for owner account missing + ErrOwnerAcctMissing = sdkerrors.Register(ModuleName, errOwnerAcctMissing, "Owner account missing") + // ErrInvalidGroups is the error when groups are empty + ErrInvalidGroups = sdkerrors.Register(ModuleName, errInvalidGroups, "Invalid groups") + // ErrInvalidDeploymentID is the error for invalid deployment id + ErrInvalidDeploymentID = sdkerrors.Register(ModuleName, errInvalidDeploymentID, "Invalid: deployment id") + // ErrEmptyVersion is the error when version is empty + ErrEmptyVersion = sdkerrors.Register(ModuleName, errEmptyVersion, "Invalid: empty version") + // ErrInvalidVersion is the error when version is invalid + ErrInvalidVersion = sdkerrors.Register(ModuleName, errInvalidVersion, "Invalid: deployment version") + // ErrInternal is the error for internal error + ErrInternal = sdkerrors.Register(ModuleName, errInternal, "internal error") + // ErrInvalidDeployment = is the error when deployment does not pass validation + ErrInvalidDeployment = sdkerrors.Register(ModuleName, errInvalidDeployment, "Invalid deployment") + // ErrInvalidGroupID is the error when already deployment exists + ErrInvalidGroupID = sdkerrors.Register(ModuleName, errInvalidGroupID, "Deployment exists") + // ErrGroupNotFound is the keeper's error for not finding a group + ErrGroupNotFound = sdkerrors.Register(ModuleName, errGroupNotFound, "Group not found") + // ErrGroupClosed is the error when deployment is closed + ErrGroupClosed = sdkerrors.Register(ModuleName, errGroupClosed, "Group already closed") + // ErrGroupOpen is the error when deployment is closed + ErrGroupOpen = sdkerrors.Register(ModuleName, errGroupOpen, "Group open") + // ErrGroupPaused is the error when deployment is closed + ErrGroupPaused = sdkerrors.Register(ModuleName, errGroupPaused, "Group paused") + // ErrGroupNotOpen indicates the Group state has progressed beyond initial Open. + ErrGroupNotOpen = sdkerrors.Register(ModuleName, errGroupNotOpen, "Group not open") + // ErrGroupSpecInvalid indicates a GroupSpec has invalid configuration + ErrGroupSpecInvalid = sdkerrors.Register(ModuleName, errGroupSpec, "GroupSpec invalid") + + // ErrInvalidDeposit indicates an invalid deposit + ErrInvalidDeposit = sdkerrors.Register(ModuleName, errInvalidDeposit, "Deposit invalid") + // ErrInvalidIDPath indicates an invalid ID path + ErrInvalidIDPath = sdkerrors.Register(ModuleName, errInvalidIDPath, "ID path invalid") + // ErrInvalidParam indicates an invalid chain parameter + ErrInvalidParam = sdkerrors.Register(ModuleName, errInvalidParam, "parameter invalid") + // ErrInvalidDeploymentDepositor indicates an invalid chain parameter + ErrInvalidDeploymentDepositor = sdkerrors.Register(ModuleName, errInvalidDeploymentDepositor, "invalid deployment depositor") +) diff --git a/go/node/deployment/v1beta4/escrow.go b/go/node/deployment/v1beta4/escrow.go new file mode 100644 index 00000000..59368fd5 --- /dev/null +++ b/go/node/deployment/v1beta4/escrow.go @@ -0,0 +1,25 @@ +package v1beta4 + +import ( + etypes "github.com/akash-network/akash-api/go/node/escrow/v1beta3" +) + +const ( + EscrowScope = "deployment" +) + +func EscrowAccountForDeployment(id DeploymentID) etypes.AccountID { + return etypes.AccountID{ + Scope: EscrowScope, + XID: id.String(), + } +} + +func DeploymentIDFromEscrowAccount(id etypes.AccountID) (DeploymentID, bool) { + if id.Scope != EscrowScope { + return DeploymentID{}, false + } + + did, err := ParseDeploymentID(id.XID) + return did, err == nil +} diff --git a/go/node/deployment/v1beta4/event.go b/go/node/deployment/v1beta4/event.go new file mode 100644 index 00000000..aa103397 --- /dev/null +++ b/go/node/deployment/v1beta4/event.go @@ -0,0 +1,309 @@ +package v1beta4 + +import ( + "encoding/hex" + "strconv" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/akash-network/akash-api/go/sdkutil" +) + +const ( + evActionDeploymentCreated = "deployment-created" + evActionDeploymentUpdated = "deployment-updated" + evActionDeploymentClosed = "deployment-closed" + evActionGroupClosed = "group-closed" + evActionGroupPaused = "group-paused" + evActionGroupStarted = "group-started" + evOwnerKey = "owner" + evDSeqKey = "dseq" + evGSeqKey = "gseq" + evVersionKey = "version" + encodedVersionHexLen = 64 +) + +// EventDeploymentCreated struct +type EventDeploymentCreated struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID DeploymentID `json:"id"` + Version []byte `json:"version"` +} + +// NewEventDeploymentCreated initializes creation event. +func NewEventDeploymentCreated(id DeploymentID, version []byte) EventDeploymentCreated { + return EventDeploymentCreated{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionDeploymentCreated, + }, + ID: id, + Version: version, + } +} + +// ToSDKEvent method creates new sdk event for EventDeploymentCreated struct +func (ev EventDeploymentCreated) ToSDKEvent() sdk.Event { + version := encodeHex(ev.Version) + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentCreated), + sdk.NewAttribute(evVersionKey, string(version)), + }, DeploymentIDEVAttributes(ev.ID)...)..., + ) +} + +// EventDeploymentUpdated struct +type EventDeploymentUpdated struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID DeploymentID `json:"id"` + Version []byte `json:"version"` +} + +// NewEventDeploymentUpdated initializes SDK type +func NewEventDeploymentUpdated(id DeploymentID, version []byte) EventDeploymentUpdated { + return EventDeploymentUpdated{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionDeploymentUpdated, + }, + ID: id, + Version: version, + } +} + +// ToSDKEvent method creates new sdk event for EventDeploymentUpdated struct +func (ev EventDeploymentUpdated) ToSDKEvent() sdk.Event { + version := encodeHex(ev.Version) + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentUpdated), + sdk.NewAttribute(evVersionKey, string(version)), + }, DeploymentIDEVAttributes(ev.ID)...)..., + ) +} + +// EventDeploymentClosed struct +type EventDeploymentClosed struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID DeploymentID `json:"id"` +} + +func NewEventDeploymentClosed(id DeploymentID) EventDeploymentClosed { + return EventDeploymentClosed{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionDeploymentClosed, + }, + ID: id, + } +} + +// ToSDKEvent method creates new sdk event for EventDeploymentClosed struct +func (ev EventDeploymentClosed) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionDeploymentClosed), + }, DeploymentIDEVAttributes(ev.ID)...)..., + ) +} + +// DeploymentIDEVAttributes returns event attribues for given DeploymentID +func DeploymentIDEVAttributes(id DeploymentID) []sdk.Attribute { + return []sdk.Attribute{ + sdk.NewAttribute(evOwnerKey, id.Owner), + sdk.NewAttribute(evDSeqKey, strconv.FormatUint(id.DSeq, 10)), + } +} + +// ParseEVDeploymentID returns deploymentID details for given event attributes +func ParseEVDeploymentID(attrs []sdk.Attribute) (DeploymentID, error) { + owner, err := sdkutil.GetAccAddress(attrs, evOwnerKey) + if err != nil { + return DeploymentID{}, err + } + dseq, err := sdkutil.GetUint64(attrs, evDSeqKey) + if err != nil { + return DeploymentID{}, err + } + + return DeploymentID{ + Owner: owner.String(), + DSeq: dseq, + }, nil +} + +// ParseEVDeploymentVersion returns the Deployment's SDL sha256 sum +func ParseEVDeploymentVersion(attrs []sdk.Attribute) ([]byte, error) { + v, err := sdkutil.GetString(attrs, evVersionKey) + if err != nil { + return nil, err + } + return decodeHex([]byte(v)) +} + +func encodeHex(src []byte) []byte { + dst := make([]byte, hex.EncodedLen(len(src))) + hex.Encode(dst, src) + return dst +} + +func decodeHex(src []byte) ([]byte, error) { + dst := make([]byte, hex.DecodedLen(len(src))) + if _, err := hex.Decode(dst, src); err != nil { + return []byte{}, err + } + return dst, nil +} + +// EventGroupClosed provides SDK event to signal group termination +type EventGroupClosed struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID GroupID `json:"id"` +} + +func NewEventGroupClosed(id GroupID) EventGroupClosed { + return EventGroupClosed{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionGroupClosed, + }, + ID: id, + } +} + +// ToSDKEvent produces the SDK notification for Event +func (ev EventGroupClosed) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupClosed), + }, GroupIDEVAttributes(ev.ID)...)..., + ) +} + +// EventGroupPaused provides SDK event to signal group termination +type EventGroupPaused struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID GroupID `json:"id"` +} + +func NewEventGroupPaused(id GroupID) EventGroupPaused { + return EventGroupPaused{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionGroupPaused, + }, + ID: id, + } +} + +// ToSDKEvent produces the SDK notification for Event +func (ev EventGroupPaused) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupPaused), + }, GroupIDEVAttributes(ev.ID)...)..., + ) +} + +// EventGroupStarted provides SDK event to signal group termination +type EventGroupStarted struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID GroupID `json:"id"` +} + +func NewEventGroupStarted(id GroupID) EventGroupStarted { + return EventGroupStarted{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionGroupStarted, + }, + ID: id, + } +} + +// ToSDKEvent produces the SDK notification for Event +func (ev EventGroupStarted) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionGroupStarted), + }, GroupIDEVAttributes(ev.ID)...)..., + ) +} + +// GroupIDEVAttributes returns event attribues for given GroupID +func GroupIDEVAttributes(id GroupID) []sdk.Attribute { + return append(DeploymentIDEVAttributes(id.DeploymentID()), + sdk.NewAttribute(evGSeqKey, strconv.FormatUint(uint64(id.GSeq), 10))) +} + +// ParseEVGroupID returns GroupID details for given event attributes +func ParseEVGroupID(attrs []sdk.Attribute) (GroupID, error) { + did, err := ParseEVDeploymentID(attrs) + if err != nil { + return GroupID{}, err + } + + gseq, err := sdkutil.GetUint64(attrs, evGSeqKey) + if err != nil { + return GroupID{}, err + } + + return GroupID{ + Owner: did.Owner, + DSeq: did.DSeq, + GSeq: uint32(gseq), + }, nil +} + +// ParseEvent parses event and returns details of event and error if occurred +func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { + if ev.Type != sdkutil.EventTypeMessage { + return nil, sdkutil.ErrUnknownType + } + if ev.Module != ModuleName { + return nil, sdkutil.ErrUnknownModule + } + switch ev.Action { + case evActionDeploymentCreated: + did, err := ParseEVDeploymentID(ev.Attributes) + if err != nil { + return nil, err + } + ver, err := ParseEVDeploymentVersion(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventDeploymentCreated(did, ver), nil + case evActionDeploymentUpdated: + did, err := ParseEVDeploymentID(ev.Attributes) + if err != nil { + return nil, err + } + ver, err := ParseEVDeploymentVersion(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventDeploymentUpdated(did, ver), nil + case evActionDeploymentClosed: + did, err := ParseEVDeploymentID(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventDeploymentClosed(did), nil + case evActionGroupClosed: + gid, err := ParseEVGroupID(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventGroupClosed(gid), nil + default: + return nil, sdkutil.ErrUnknownAction + } +} diff --git a/go/node/deployment/v1beta4/events_test.go b/go/node/deployment/v1beta4/events_test.go new file mode 100644 index 00000000..9c09d071 --- /dev/null +++ b/go/node/deployment/v1beta4/events_test.go @@ -0,0 +1,336 @@ +package v1beta4 + +import ( + "crypto/sha256" + "errors" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/akash-network/akash-api/go/sdkutil" +) + +var ( + keyAcc, _ = sdk.AccAddressFromBech32("akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr") + errWildcard = errors.New("wildcard string error can't be matched") + tmpSum = sha256.Sum256([]byte(keyAcc)) + deploymentVersion = encodeHex(tmpSum[:]) +) + +type testEventParsing struct { + msg sdkutil.Event + expErr error +} + +func (tep testEventParsing) testMessageType() func(t *testing.T) { + _, err := ParseEvent(tep.msg) + return func(t *testing.T) { + // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. + if errors.Is(tep.expErr, errWildcard) { + require.Error(t, err) + } else { + require.Equal(t, tep.expErr, err) + } + } +} + +var TEPS = []testEventParsing{ + { + msg: sdkutil.Event{ + Type: "nil", + }, + expErr: sdkutil.ErrUnknownType, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + }, + expErr: sdkutil.ErrUnknownModule, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + }, + expErr: sdkutil.ErrUnknownAction, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: "nil", + }, + expErr: sdkutil.ErrUnknownModule, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: "nil", + }, + expErr: sdkutil.ErrUnknownAction, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evVersionKey, + Value: string(deploymentVersion), + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evDSeqKey, + Value: "abc", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evDSeqKey, + Value: "5", + }, + }, + }, + expErr: errWildcard, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentUpdated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evVersionKey, + Value: string(deploymentVersion), + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentUpdated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evDSeqKey, + Value: "5", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionGroupClosed, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "1", + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentClosed, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evDSeqKey, + Value: "5", + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentClosed, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evDSeqKey, + Value: "abc", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionGroupClosed, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evDSeqKey, + Value: "5", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionGroupClosed, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + { + Key: evGSeqKey, + Value: "1", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentUpdated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "neh", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evVersionKey, + Value: string(deploymentVersion), + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionDeploymentUpdated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: keyAcc.String(), + }, + }, + }, + expErr: errWildcard, + }, +} + +func TestEventParsing(t *testing.T) { + for i, test := range TEPS { + t.Run(strconv.Itoa(i), test.testMessageType()) + } +} + +func TestVersionEncoding(t *testing.T) { + versionHex := encodeHex(tmpSum[:]) + assert.Len(t, versionHex, encodedVersionHexLen) + decodedVersion, err := decodeHex(versionHex) + assert.NoError(t, err) + assert.Equal(t, tmpSum[:], decodedVersion) +} diff --git a/go/node/deployment/v1beta4/genesis.pb.go b/go/node/deployment/v1beta4/genesis.pb.go new file mode 100644 index 00000000..da183f48 --- /dev/null +++ b/go/node/deployment/v1beta4/genesis.pb.go @@ -0,0 +1,630 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/genesis.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisDeployment defines the basic genesis state used by deployment module +type GenesisDeployment struct { + Deployment Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` + Groups []Group `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` +} + +func (m *GenesisDeployment) Reset() { *m = GenesisDeployment{} } +func (m *GenesisDeployment) String() string { return proto.CompactTextString(m) } +func (*GenesisDeployment) ProtoMessage() {} +func (*GenesisDeployment) Descriptor() ([]byte, []int) { + return fileDescriptor_a4941a99faf6028f, []int{0} +} +func (m *GenesisDeployment) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisDeployment) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisDeployment.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisDeployment) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisDeployment.Merge(m, src) +} +func (m *GenesisDeployment) XXX_Size() int { + return m.Size() +} +func (m *GenesisDeployment) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisDeployment.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisDeployment proto.InternalMessageInfo + +func (m *GenesisDeployment) GetDeployment() Deployment { + if m != nil { + return m.Deployment + } + return Deployment{} +} + +func (m *GenesisDeployment) GetGroups() []Group { + if m != nil { + return m.Groups + } + return nil +} + +// GenesisState stores slice of genesis deployment instance +type GenesisState struct { + Deployments []GenesisDeployment `protobuf:"bytes,1,rep,name=deployments,proto3" json:"deployments" yaml:"deployments"` + Params Params `protobuf:"bytes,2,opt,name=params,proto3" json:"params" yaml:"params"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_a4941a99faf6028f, []int{1} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetDeployments() []GenesisDeployment { + if m != nil { + return m.Deployments + } + return nil +} + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func init() { + proto.RegisterType((*GenesisDeployment)(nil), "akash.deployment.v1beta4.GenesisDeployment") + proto.RegisterType((*GenesisState)(nil), "akash.deployment.v1beta4.GenesisState") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/genesis.proto", fileDescriptor_a4941a99faf6028f) +} + +var fileDescriptor_a4941a99faf6028f = []byte{ + // 358 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x92, 0xbf, 0x4e, 0xc3, 0x30, + 0x10, 0xc6, 0xe3, 0x22, 0x75, 0x70, 0x61, 0x68, 0xc4, 0x10, 0x75, 0x88, 0x2b, 0xab, 0x40, 0x2b, + 0x44, 0x2c, 0x0a, 0x13, 0x12, 0x4b, 0x84, 0xd4, 0x15, 0x85, 0x81, 0x3f, 0x9b, 0x4b, 0xad, 0xb4, + 0x6a, 0x13, 0x47, 0x89, 0x0b, 0xf4, 0x2d, 0x78, 0xac, 0x8e, 0x1d, 0x19, 0x50, 0x84, 0x9a, 0x8d, + 0xb1, 0x4f, 0x80, 0x6a, 0x5b, 0x38, 0xaa, 0x08, 0x5b, 0x2e, 0xfe, 0x7d, 0xf7, 0xdd, 0x77, 0x3a, + 0x78, 0x4c, 0xa7, 0x34, 0x1b, 0x93, 0x11, 0x4b, 0x66, 0x7c, 0x11, 0xb1, 0x58, 0x90, 0x97, 0xf3, + 0x21, 0x13, 0xf4, 0x92, 0x84, 0x2c, 0x66, 0xd9, 0x24, 0xf3, 0x92, 0x94, 0x0b, 0x6e, 0x3b, 0x92, + 0xf3, 0x0c, 0xe7, 0x69, 0xae, 0x75, 0x18, 0xf2, 0x90, 0x4b, 0x88, 0x6c, 0xbf, 0x14, 0xdf, 0xea, + 0x55, 0xf6, 0x2d, 0xb5, 0x50, 0x68, 0xa7, 0x7a, 0x84, 0x94, 0xcf, 0x13, 0x4d, 0x1d, 0x55, 0x52, + 0x09, 0x4d, 0x69, 0xa4, 0xe7, 0xc4, 0x9f, 0x00, 0x36, 0x07, 0x6a, 0xf2, 0x9b, 0x5f, 0xd4, 0x8e, + 0x20, 0x34, 0x42, 0x07, 0xb4, 0x41, 0xb7, 0xd1, 0xef, 0x78, 0x55, 0x91, 0x3c, 0xa3, 0xf4, 0x4f, + 0x96, 0x39, 0xb2, 0xbe, 0x73, 0x54, 0xd2, 0x6f, 0x72, 0xd4, 0x5c, 0xd0, 0x68, 0x76, 0x85, 0xcd, + 0x3f, 0x1c, 0x94, 0x00, 0xfb, 0x01, 0xd6, 0xe5, 0xe8, 0x99, 0x53, 0x6b, 0xef, 0x75, 0x1b, 0x7d, + 0x54, 0x6d, 0x35, 0xd8, 0x72, 0x3e, 0xd2, 0x2e, 0x5a, 0xb6, 0xc9, 0xd1, 0x81, 0x72, 0x50, 0x35, + 0x0e, 0xf4, 0x03, 0x2e, 0x00, 0xdc, 0xd7, 0xf1, 0xee, 0x04, 0x15, 0xcc, 0x7e, 0x83, 0x0d, 0xd3, + 0x35, 0x73, 0x80, 0xf4, 0x3b, 0xfd, 0xc7, 0x6f, 0x77, 0x37, 0x7e, 0x4f, 0x7b, 0x97, 0xfb, 0x6c, + 0x72, 0x64, 0xef, 0x46, 0xcc, 0x70, 0x50, 0x46, 0xec, 0x47, 0x58, 0x57, 0x9b, 0x77, 0x6a, 0x72, + 0x9f, 0xed, 0x6a, 0xd3, 0x5b, 0xc9, 0x99, 0x94, 0x4a, 0x67, 0x52, 0xaa, 0x1a, 0x07, 0xfa, 0xc1, + 0xbf, 0x5f, 0xae, 0x5d, 0xb0, 0x5a, 0xbb, 0xe0, 0x6b, 0xed, 0x82, 0xf7, 0xc2, 0xb5, 0x56, 0x85, + 0x6b, 0x7d, 0x14, 0xae, 0xf5, 0x74, 0x1d, 0x4e, 0xc4, 0x78, 0x3e, 0xf4, 0x9e, 0x79, 0x44, 0xa4, + 0xdd, 0x59, 0xcc, 0xc4, 0x2b, 0x4f, 0xa7, 0xba, 0xa2, 0xc9, 0x84, 0x84, 0x9c, 0xc4, 0x7c, 0xc4, + 0xfe, 0x38, 0x95, 0x61, 0x5d, 0x1e, 0xc9, 0xc5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x28, 0xa4, + 0xb1, 0x57, 0xf6, 0x02, 0x00, 0x00, +} + +func (m *GenesisDeployment) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisDeployment) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisDeployment) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Deployments) > 0 { + for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisDeployment) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Deployment.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Deployments) > 0 { + for _, e := range m.Deployments { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisDeployment) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisDeployment: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisDeployment: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, Group{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Deployments = append(m.Deployments, GenesisDeployment{}) + if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/group.pb.go b/go/node/deployment/v1beta4/group.pb.go new file mode 100644 index 00000000..2a8dbf5c --- /dev/null +++ b/go/node/deployment/v1beta4/group.pb.go @@ -0,0 +1,505 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/group.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of group +type Group_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + GroupStateInvalid Group_State = 0 + // GroupOpen denotes state for group open + GroupOpen Group_State = 1 + // GroupOrdered denotes state for group ordered + GroupPaused Group_State = 2 + // GroupInsufficientFunds denotes state for group insufficient_funds + GroupInsufficientFunds Group_State = 3 + // GroupClosed denotes state for group closed + GroupClosed Group_State = 4 +) + +var Group_State_name = map[int32]string{ + 0: "invalid", + 1: "open", + 2: "paused", + 3: "insufficient_funds", + 4: "closed", +} + +var Group_State_value = map[string]int32{ + "invalid": 0, + "open": 1, + "paused": 2, + "insufficient_funds": 3, + "closed": 4, +} + +func (x Group_State) String() string { + return proto.EnumName(Group_State_name, int32(x)) +} + +func (Group_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a45c04780ffee23e, []int{0, 0} +} + +// Group stores group id, state and specifications of group +type Group struct { + GroupID GroupID `protobuf:"bytes,1,opt,name=group_id,json=groupId,proto3" json:"id" yaml:"id"` + State Group_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.deployment.v1beta4.Group_State" json:"state" yaml:"state"` + GroupSpec GroupSpec `protobuf:"bytes,3,opt,name=group_spec,json=groupSpec,proto3" json:"spec" yaml:"spec"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (m *Group) Reset() { *m = Group{} } +func (m *Group) String() string { return proto.CompactTextString(m) } +func (*Group) ProtoMessage() {} +func (*Group) Descriptor() ([]byte, []int) { + return fileDescriptor_a45c04780ffee23e, []int{0} +} +func (m *Group) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Group) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Group.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Group) XXX_Merge(src proto.Message) { + xxx_messageInfo_Group.Merge(m, src) +} +func (m *Group) XXX_Size() int { + return m.Size() +} +func (m *Group) XXX_DiscardUnknown() { + xxx_messageInfo_Group.DiscardUnknown(m) +} + +var xxx_messageInfo_Group proto.InternalMessageInfo + +func (m *Group) GetGroupID() GroupID { + if m != nil { + return m.GroupID + } + return GroupID{} +} + +func (m *Group) GetState() Group_State { + if m != nil { + return m.State + } + return GroupStateInvalid +} + +func (m *Group) GetGroupSpec() GroupSpec { + if m != nil { + return m.GroupSpec + } + return GroupSpec{} +} + +func (m *Group) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func init() { + proto.RegisterEnum("akash.deployment.v1beta4.Group_State", Group_State_name, Group_State_value) + proto.RegisterType((*Group)(nil), "akash.deployment.v1beta4.Group") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/group.proto", fileDescriptor_a45c04780ffee23e) +} + +var fileDescriptor_a45c04780ffee23e = []byte{ + // 487 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0x3d, 0x8f, 0xd3, 0x30, + 0x1c, 0xc6, 0x93, 0x6b, 0x7a, 0xa5, 0x2e, 0x2f, 0xc5, 0xe2, 0x25, 0xe4, 0x44, 0x12, 0xc2, 0x8b, + 0xba, 0x90, 0x88, 0xc2, 0x74, 0x12, 0x03, 0x05, 0x81, 0x3a, 0x81, 0x7a, 0x12, 0x48, 0x2c, 0xc5, + 0x8d, 0xdd, 0x9c, 0x75, 0xad, 0x6d, 0x35, 0xce, 0xa1, 0x5b, 0x99, 0x50, 0x27, 0xbe, 0x40, 0x25, + 0x24, 0xbe, 0x04, 0x1f, 0xe1, 0xc6, 0x1b, 0x99, 0x22, 0xd4, 0x2e, 0xa8, 0x63, 0x3f, 0x01, 0xb2, + 0x1d, 0xc4, 0x0d, 0x70, 0xb7, 0xc5, 0xcf, 0xf3, 0xfb, 0x3f, 0x79, 0x6c, 0xfd, 0xc1, 0x3d, 0x74, + 0x80, 0xf2, 0xfd, 0x04, 0x13, 0x31, 0xe1, 0x47, 0x53, 0xc2, 0x64, 0x72, 0xf8, 0x68, 0x44, 0x24, + 0x7a, 0x92, 0x64, 0x33, 0x5e, 0x88, 0x58, 0xcc, 0xb8, 0xe4, 0xd0, 0xd5, 0x54, 0xfc, 0x97, 0x8a, + 0x2b, 0xca, 0xbb, 0x96, 0xf1, 0x8c, 0x6b, 0x28, 0x51, 0x5f, 0x86, 0xf7, 0x1e, 0x9c, 0x9d, 0x4a, + 0x71, 0xc5, 0x75, 0xce, 0xe6, 0x72, 0x41, 0x52, 0x43, 0x46, 0x9f, 0x1c, 0x50, 0x7f, 0xa5, 0x34, + 0xf8, 0x01, 0x5c, 0xd0, 0xe6, 0x90, 0x62, 0xd7, 0x0e, 0xed, 0x4e, 0xab, 0x7b, 0x27, 0xfe, 0x5f, + 0xbd, 0x58, 0x8f, 0xf4, 0x5f, 0xf4, 0xa2, 0xe3, 0x32, 0xb0, 0x96, 0x65, 0xd0, 0xa8, 0x84, 0x75, + 0x19, 0x6c, 0x51, 0xbc, 0x29, 0x83, 0xe6, 0x11, 0x9a, 0x4e, 0x76, 0x23, 0x8a, 0xa3, 0x41, 0x43, + 0xc7, 0xf6, 0x31, 0x7c, 0x0b, 0xea, 0xb9, 0x44, 0x92, 0xb8, 0x5b, 0xa1, 0xdd, 0xb9, 0xdc, 0xbd, + 0x7f, 0x4e, 0x7c, 0xbc, 0xa7, 0xe0, 0xde, 0xad, 0x75, 0x19, 0x98, 0xb9, 0x4d, 0x19, 0x5c, 0x34, + 0xb1, 0xfa, 0x18, 0x0d, 0x8c, 0x0c, 0x87, 0x00, 0x98, 0xe6, 0xea, 0x5e, 0x6e, 0x4d, 0x77, 0xbf, + 0x7b, 0x4e, 0xf8, 0x9e, 0x20, 0x69, 0x6f, 0x47, 0xb5, 0x5f, 0x97, 0x81, 0xa3, 0x06, 0x37, 0x65, + 0xd0, 0xaa, 0xd2, 0x05, 0x49, 0xa3, 0x41, 0x33, 0xfb, 0xc3, 0xc1, 0xdb, 0x00, 0xa4, 0x33, 0x82, + 0x24, 0xc1, 0x43, 0x24, 0x5d, 0x27, 0xb4, 0x3b, 0xb5, 0x41, 0xb3, 0x52, 0x9e, 0xc9, 0xe8, 0xbb, + 0x0d, 0xea, 0xba, 0x2b, 0x8c, 0x40, 0x83, 0xb2, 0x43, 0x34, 0xa1, 0xb8, 0x6d, 0x79, 0xd7, 0xe7, + 0x8b, 0xf0, 0xaa, 0xf9, 0x99, 0x32, 0xfb, 0xc6, 0x80, 0x37, 0x81, 0xc3, 0x05, 0x61, 0x6d, 0xdb, + 0xbb, 0x34, 0x5f, 0x84, 0x4d, 0x0d, 0xbc, 0x16, 0x84, 0xc1, 0x1d, 0xb0, 0x2d, 0x50, 0x91, 0x13, + 0xdc, 0xde, 0xf2, 0xae, 0xcc, 0x17, 0x61, 0x4b, 0x5b, 0x6f, 0xb4, 0x04, 0xbb, 0x00, 0x52, 0x96, + 0x17, 0xe3, 0x31, 0x4d, 0x29, 0x61, 0x72, 0x38, 0x2e, 0x18, 0xce, 0xdb, 0x35, 0xcf, 0x9b, 0x2f, + 0xc2, 0x1b, 0xe6, 0xf1, 0x4f, 0xd9, 0x2f, 0x95, 0xab, 0x02, 0xd3, 0x09, 0x57, 0x81, 0xce, 0xa9, + 0xc0, 0xe7, 0x5a, 0xf2, 0x9c, 0xcf, 0xdf, 0x7c, 0x6b, 0xd7, 0xf9, 0xf5, 0x35, 0xb0, 0x7a, 0xef, + 0x8e, 0x97, 0xbe, 0x7d, 0xb2, 0xf4, 0xed, 0x9f, 0x4b, 0xdf, 0xfe, 0xb2, 0xf2, 0xad, 0x93, 0x95, + 0x6f, 0xfd, 0x58, 0xf9, 0xd6, 0xfb, 0xa7, 0x19, 0x95, 0xfb, 0xc5, 0x28, 0x4e, 0xf9, 0x34, 0xd1, + 0x0f, 0xfa, 0x90, 0x11, 0xf9, 0x91, 0xcf, 0x0e, 0xaa, 0x13, 0x12, 0x34, 0xc9, 0x78, 0xc2, 0x38, + 0x26, 0xff, 0xd8, 0xb6, 0xd1, 0xb6, 0x5e, 0xb2, 0xc7, 0xbf, 0x03, 0x00, 0x00, 0xff, 0xff, 0x77, + 0x1c, 0x32, 0xc6, 0x0e, 0x03, 0x00, 0x00, +} + +func (m *Group) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Group) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Group) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreatedAt != 0 { + i = encodeVarintGroup(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.GroupSpec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.State != 0 { + i = encodeVarintGroup(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.GroupID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroup(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGroup(dAtA []byte, offset int, v uint64) int { + offset -= sovGroup(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Group) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.GroupID.Size() + n += 1 + l + sovGroup(uint64(l)) + if m.State != 0 { + n += 1 + sovGroup(uint64(m.State)) + } + l = m.GroupSpec.Size() + n += 1 + l + sovGroup(uint64(l)) + if m.CreatedAt != 0 { + n += 1 + sovGroup(uint64(m.CreatedAt)) + } + return n +} + +func sovGroup(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroup(x uint64) (n int) { + return sovGroup(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Group) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Group: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Group: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GroupID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Group_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GroupSpec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroup + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroup + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.GroupSpec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroup + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGroup(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroup + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroup(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroup + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroup + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroup + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroup + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroup = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroup = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroup = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/group_validation.go b/go/node/deployment/v1beta4/group_validation.go new file mode 100644 index 00000000..61c1d668 --- /dev/null +++ b/go/node/deployment/v1beta4/group_validation.go @@ -0,0 +1,35 @@ +package v1beta4 + +import ( + "fmt" +) + +// ValidateDeploymentGroups does validation for all deployment groups +func ValidateDeploymentGroups(gspecs []GroupSpec) error { + if len(gspecs) == 0 { + return ErrInvalidGroups + } + + names := make(map[string]int, len(gspecs)) // Used as set + denom := "" + for idx, group := range gspecs { + // all must be same denomination + if idx == 0 { + denom = group.Price().Denom + } else if group.Price().Denom != denom { + return fmt.Errorf("inconsistent denomination: %v != %v", denom, group.Price().Denom) + } + + if err := group.ValidateBasic(); err != nil { + return err + } + + if _, exists := names[group.GetName()]; exists { + return fmt.Errorf("duplicate deployment group name %q", group.GetName()) + } + + names[group.GetName()] = 0 // Value stored does not matter + } + + return nil +} diff --git a/go/node/deployment/v1beta4/groupid.pb.go b/go/node/deployment/v1beta4/groupid.pb.go new file mode 100644 index 00000000..a0b836c6 --- /dev/null +++ b/go/node/deployment/v1beta4/groupid.pb.go @@ -0,0 +1,395 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/groupid.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GroupID stores owner, deployment sequence number and group sequence number +type GroupID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` +} + +func (m *GroupID) Reset() { *m = GroupID{} } +func (*GroupID) ProtoMessage() {} +func (*GroupID) Descriptor() ([]byte, []int) { + return fileDescriptor_97119ab13846b441, []int{0} +} +func (m *GroupID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GroupID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GroupID) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupID.Merge(m, src) +} +func (m *GroupID) XXX_Size() int { + return m.Size() +} +func (m *GroupID) XXX_DiscardUnknown() { + xxx_messageInfo_GroupID.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupID proto.InternalMessageInfo + +func (m *GroupID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *GroupID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *GroupID) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func init() { + proto.RegisterType((*GroupID)(nil), "akash.deployment.v1beta4.GroupID") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/groupid.proto", fileDescriptor_97119ab13846b441) +} + +var fileDescriptor_97119ab13846b441 = []byte{ + // 281 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, + 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xc8, 0x4c, 0xd1, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0x92, 0x00, 0xab, 0xd3, 0x43, 0xa8, 0xd3, 0x83, 0xaa, 0x93, 0x12, 0x49, 0xcf, + 0x4f, 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0x95, 0xd6, 0x31, 0x72, 0xb1, 0xbb, 0x83, + 0x4c, 0xf0, 0x74, 0x11, 0xd2, 0xe7, 0x62, 0xcd, 0x2f, 0xcf, 0x4b, 0x2d, 0x92, 0x60, 0x54, 0x60, + 0xd4, 0xe0, 0x74, 0x92, 0x7c, 0x75, 0x4f, 0x1e, 0x22, 0xf0, 0xe9, 0x9e, 0x3c, 0x4f, 0x65, 0x62, + 0x6e, 0x8e, 0x95, 0x12, 0x98, 0xab, 0x14, 0x04, 0x11, 0x16, 0x32, 0xe6, 0x62, 0x49, 0x29, 0x4e, + 0x2d, 0x94, 0x60, 0x52, 0x60, 0xd4, 0x60, 0x71, 0x92, 0x7f, 0x74, 0x4f, 0x9e, 0xc5, 0x25, 0x38, + 0xb5, 0xf0, 0xd5, 0x3d, 0x79, 0xb0, 0xf8, 0xa7, 0x7b, 0xf2, 0xdc, 0x10, 0x6d, 0x20, 0x9e, 0x52, + 0x10, 0x58, 0x10, 0xa4, 0x29, 0x1d, 0xa4, 0x89, 0x59, 0x81, 0x51, 0x83, 0x17, 0xa2, 0xc9, 0x1d, + 0xaa, 0x29, 0x1d, 0x45, 0x53, 0x3a, 0x44, 0x13, 0x88, 0xb2, 0xe2, 0x98, 0xb1, 0x40, 0x9e, 0xe1, + 0xc5, 0x02, 0x79, 0x06, 0xa7, 0xf0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, 0x3c, 0x92, 0x63, 0x7c, 0xf0, + 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, 0x6e, 0x3c, 0x96, 0x63, 0x88, + 0xb2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x07, 0x87, 0x82, 0x6e, + 0x5e, 0x6a, 0x49, 0x79, 0x7e, 0x51, 0x36, 0x94, 0x97, 0x58, 0x90, 0xa9, 0x9f, 0x9e, 0xaf, 0x9f, + 0x97, 0x9f, 0x92, 0x8a, 0x25, 0x1c, 0x93, 0xd8, 0xc0, 0x01, 0x62, 0x0c, 0x08, 0x00, 0x00, 0xff, + 0xff, 0x24, 0x00, 0xce, 0x72, 0x6a, 0x01, 0x00, 0x00, +} + +func (m *GroupID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.GSeq != 0 { + i = encodeVarintGroupid(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintGroupid(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintGroupid(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGroupid(dAtA []byte, offset int, v uint64) int { + offset -= sovGroupid(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GroupID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovGroupid(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovGroupid(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovGroupid(uint64(m.GSeq)) + } + return n +} + +func sovGroupid(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroupid(x uint64) (n int) { + return sovGroupid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GroupID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGroupid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGroupid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipGroupid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroupid(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroupid + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroupid + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroupid + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroupid = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroupid = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroupid = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/groupmsg.pb.go b/go/node/deployment/v1beta4/groupmsg.pb.go new file mode 100644 index 00000000..5570e7d5 --- /dev/null +++ b/go/node/deployment/v1beta4/groupmsg.pb.go @@ -0,0 +1,1034 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/groupmsg.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// MsgCloseGroup defines SDK message to close a single Group within a Deployment. +type MsgCloseGroup struct { + ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCloseGroup) Reset() { *m = MsgCloseGroup{} } +func (m *MsgCloseGroup) String() string { return proto.CompactTextString(m) } +func (*MsgCloseGroup) ProtoMessage() {} +func (*MsgCloseGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{0} +} +func (m *MsgCloseGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseGroup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseGroup.Merge(m, src) +} +func (m *MsgCloseGroup) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseGroup) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseGroup proto.InternalMessageInfo + +func (m *MsgCloseGroup) GetID() GroupID { + if m != nil { + return m.ID + } + return GroupID{} +} + +// MsgCloseGroupResponse defines the Msg/CloseGroup response type. +type MsgCloseGroupResponse struct { +} + +func (m *MsgCloseGroupResponse) Reset() { *m = MsgCloseGroupResponse{} } +func (m *MsgCloseGroupResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCloseGroupResponse) ProtoMessage() {} +func (*MsgCloseGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{1} +} +func (m *MsgCloseGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseGroupResponse.Merge(m, src) +} +func (m *MsgCloseGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseGroupResponse proto.InternalMessageInfo + +// MsgPauseGroup defines SDK message to close a single Group within a Deployment. +type MsgPauseGroup struct { + ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgPauseGroup) Reset() { *m = MsgPauseGroup{} } +func (m *MsgPauseGroup) String() string { return proto.CompactTextString(m) } +func (*MsgPauseGroup) ProtoMessage() {} +func (*MsgPauseGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{2} +} +func (m *MsgPauseGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgPauseGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgPauseGroup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgPauseGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgPauseGroup.Merge(m, src) +} +func (m *MsgPauseGroup) XXX_Size() int { + return m.Size() +} +func (m *MsgPauseGroup) XXX_DiscardUnknown() { + xxx_messageInfo_MsgPauseGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgPauseGroup proto.InternalMessageInfo + +func (m *MsgPauseGroup) GetID() GroupID { + if m != nil { + return m.ID + } + return GroupID{} +} + +// MsgPauseGroupResponse defines the Msg/PauseGroup response type. +type MsgPauseGroupResponse struct { +} + +func (m *MsgPauseGroupResponse) Reset() { *m = MsgPauseGroupResponse{} } +func (m *MsgPauseGroupResponse) String() string { return proto.CompactTextString(m) } +func (*MsgPauseGroupResponse) ProtoMessage() {} +func (*MsgPauseGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{3} +} +func (m *MsgPauseGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgPauseGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgPauseGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgPauseGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgPauseGroupResponse.Merge(m, src) +} +func (m *MsgPauseGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgPauseGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgPauseGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgPauseGroupResponse proto.InternalMessageInfo + +// MsgStartGroup defines SDK message to close a single Group within a Deployment. +type MsgStartGroup struct { + ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id" yaml:"id"` +} + +func (m *MsgStartGroup) Reset() { *m = MsgStartGroup{} } +func (m *MsgStartGroup) String() string { return proto.CompactTextString(m) } +func (*MsgStartGroup) ProtoMessage() {} +func (*MsgStartGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{4} +} +func (m *MsgStartGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgStartGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgStartGroup.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgStartGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgStartGroup.Merge(m, src) +} +func (m *MsgStartGroup) XXX_Size() int { + return m.Size() +} +func (m *MsgStartGroup) XXX_DiscardUnknown() { + xxx_messageInfo_MsgStartGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgStartGroup proto.InternalMessageInfo + +func (m *MsgStartGroup) GetID() GroupID { + if m != nil { + return m.ID + } + return GroupID{} +} + +// MsgStartGroupResponse defines the Msg/StartGroup response type. +type MsgStartGroupResponse struct { +} + +func (m *MsgStartGroupResponse) Reset() { *m = MsgStartGroupResponse{} } +func (m *MsgStartGroupResponse) String() string { return proto.CompactTextString(m) } +func (*MsgStartGroupResponse) ProtoMessage() {} +func (*MsgStartGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ec2e655b03e10552, []int{5} +} +func (m *MsgStartGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgStartGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgStartGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgStartGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgStartGroupResponse.Merge(m, src) +} +func (m *MsgStartGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgStartGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgStartGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgStartGroupResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*MsgCloseGroup)(nil), "akash.deployment.v1beta4.MsgCloseGroup") + proto.RegisterType((*MsgCloseGroupResponse)(nil), "akash.deployment.v1beta4.MsgCloseGroupResponse") + proto.RegisterType((*MsgPauseGroup)(nil), "akash.deployment.v1beta4.MsgPauseGroup") + proto.RegisterType((*MsgPauseGroupResponse)(nil), "akash.deployment.v1beta4.MsgPauseGroupResponse") + proto.RegisterType((*MsgStartGroup)(nil), "akash.deployment.v1beta4.MsgStartGroup") + proto.RegisterType((*MsgStartGroupResponse)(nil), "akash.deployment.v1beta4.MsgStartGroupResponse") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/groupmsg.proto", fileDescriptor_ec2e655b03e10552) +} + +var fileDescriptor_ec2e655b03e10552 = []byte{ + // 286 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4f, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, + 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x4f, 0x2f, 0xca, 0x2f, 0x2d, 0xc8, 0x2d, 0x4e, 0xd7, 0x2b, 0x28, + 0xca, 0x2f, 0xc9, 0x17, 0x92, 0x00, 0x2b, 0xd4, 0x43, 0x28, 0xd4, 0x83, 0x2a, 0x94, 0x12, 0x49, + 0xcf, 0x4f, 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0xd4, 0xf0, 0x1b, 0x9c, 0x99, + 0x02, 0x51, 0xa7, 0x94, 0xce, 0xc5, 0xeb, 0x5b, 0x9c, 0xee, 0x9c, 0x93, 0x5f, 0x9c, 0xea, 0x0e, + 0x92, 0x10, 0x0a, 0xe0, 0x62, 0xca, 0x4c, 0x91, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x36, 0x52, 0xd4, + 0xc3, 0x65, 0xab, 0x1e, 0x58, 0xb1, 0xa7, 0x8b, 0x93, 0xec, 0x89, 0x7b, 0xf2, 0x0c, 0x8f, 0xee, + 0xc9, 0x33, 0x79, 0xba, 0xbc, 0xba, 0x27, 0xcf, 0x94, 0x99, 0xf2, 0xe9, 0x9e, 0x3c, 0x67, 0x65, + 0x62, 0x6e, 0x8e, 0x95, 0x52, 0x66, 0x8a, 0x52, 0x10, 0x53, 0x66, 0x8a, 0x15, 0xcb, 0x8b, 0x05, + 0xf2, 0x0c, 0x4a, 0xe2, 0x5c, 0xa2, 0x28, 0x16, 0x05, 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, + 0x42, 0x5d, 0x10, 0x90, 0x58, 0x4a, 0x1f, 0x17, 0x20, 0x2c, 0x42, 0x73, 0x41, 0x70, 0x49, 0x62, + 0x51, 0x09, 0x3d, 0x5c, 0x80, 0xb0, 0x08, 0xe6, 0x02, 0xa7, 0xf0, 0x13, 0x8f, 0xe4, 0x18, 0x2f, + 0x3c, 0x92, 0x63, 0x7c, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x0b, 0x8f, 0xe5, 0x18, + 0x6e, 0x3c, 0x96, 0x63, 0x88, 0xb2, 0x4d, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x07, 0x3b, 0x44, 0x37, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, 0x4b, 0x2c, 0xc8, + 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0xc5, 0x12, 0xd9, 0x49, 0x6c, 0xe0, 0x58, 0x36, + 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xa1, 0xc9, 0x81, 0x55, 0x68, 0x02, 0x00, 0x00, +} + +func (m *MsgCloseGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCloseGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgPauseGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgPauseGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgPauseGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgPauseGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgPauseGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgPauseGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgStartGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgStartGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgStartGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupmsg(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgStartGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgStartGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgStartGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintGroupmsg(dAtA []byte, offset int, v uint64) int { + offset -= sovGroupmsg(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *MsgCloseGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovGroupmsg(uint64(l)) + return n +} + +func (m *MsgCloseGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgPauseGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovGroupmsg(uint64(l)) + return n +} + +func (m *MsgPauseGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgStartGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovGroupmsg(uint64(l)) + return n +} + +func (m *MsgStartGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovGroupmsg(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroupmsg(x uint64) (n int) { + return sovGroupmsg(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *MsgCloseGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgPauseGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgPauseGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgPauseGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgPauseGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgPauseGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgPauseGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgStartGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgStartGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgStartGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupmsg + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupmsg + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgStartGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgStartGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgStartGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGroupmsg(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupmsg + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroupmsg(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupmsg + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroupmsg + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroupmsg + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroupmsg + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroupmsg = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroupmsg = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroupmsg = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/groupspec.go b/go/node/deployment/v1beta4/groupspec.go new file mode 100644 index 00000000..e0f63585 --- /dev/null +++ b/go/node/deployment/v1beta4/groupspec.go @@ -0,0 +1,196 @@ +package v1beta4 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + + atypes "github.com/akash-network/akash-api/go/node/audit/v1beta4" + types "github.com/akash-network/akash-api/go/node/types/resources/v1" +) + +type ResourceGroup interface { + GetName() string + GetResourceUnits() ResourceUnits +} + +var _ ResourceGroup = (*GroupSpec)(nil) + +type GroupSpecs []*GroupSpec + +func (gspecs GroupSpecs) Dup() GroupSpecs { + res := make(GroupSpecs, 0, len(gspecs)) + + for _, gspec := range gspecs { + gs := gspec.Dup() + res = append(res, &gs) + } + return res +} + +func (g GroupSpec) Dup() GroupSpec { + res := GroupSpec{ + Name: g.Name, + Requirements: g.Requirements.Dup(), + Resources: g.Resources, + } + + return res +} + +// ValidateBasic asserts non-zero values +func (g GroupSpec) ValidateBasic() error { + return g.validate() +} + +// GetResourceUnits method returns resources list in group +func (g GroupSpec) GetResourceUnits() ResourceUnits { + resources := make(ResourceUnits, 0, len(g.Resources)) + + for _, r := range g.Resources { + resources = append(resources, r) + } + + return resources +} + +// GetName method returns group name +func (g GroupSpec) GetName() string { + return g.Name +} + +// Price method returns price of group +func (g GroupSpec) Price() sdk.DecCoin { + var price sdk.DecCoin + for idx, resource := range g.Resources { + if idx == 0 { + price = resource.FullPrice() + continue + } + price = price.Add(resource.FullPrice()) + } + return price +} + +// MatchResourcesRequirements check if resources attributes match provider's capabilities +func (g GroupSpec) MatchResourcesRequirements(pattr types.Attributes) bool { + for _, rgroup := range g.GetResourceUnits() { + pgroup := pattr.GetCapabilitiesGroup("storage") + for _, storage := range rgroup.Storage { + if len(storage.Attributes) == 0 { + continue + } + + if !storage.Attributes.IN(pgroup) { + return false + } + } + if gpu := rgroup.GPU; gpu.Units.Val.Uint64() > 0 { + attr := gpu.Attributes + if len(attr) == 0 { + continue + } + + pgroup = pattr.GetCapabilitiesMap("gpu") + + if !gpu.Attributes.AnyIN(pgroup) { + return false + } + } + } + + return true +} + +// MatchRequirements method compares provided attributes with specific group attributes. +// Argument provider is a bit cumbersome. First element is attributes from x/provider store +// in case tenant does not need signed attributes at all +// rest of elements (if any) are attributes signed by various auditors +func (g GroupSpec) MatchRequirements(provider []atypes.Provider) bool { + if (len(g.Requirements.SignedBy.AnyOf) != 0) || (len(g.Requirements.SignedBy.AllOf) != 0) { + // we cannot match if there is no signed attributes + if len(provider) < 2 { + return false + } + + existingRequirements := make(attributesMatching) + + for _, existing := range provider[1:] { + existingRequirements[existing.Auditor] = existing.Attributes + } + + if len(g.Requirements.SignedBy.AllOf) != 0 { + for _, validator := range g.Requirements.SignedBy.AllOf { + // if at least one signature does not exist or no match on attributes - requirements cannot match + if existingAttr, exists := existingRequirements[validator]; !exists || + !types.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { + return false + } + } + } + + if len(g.Requirements.SignedBy.AnyOf) != 0 { + for _, validator := range g.Requirements.SignedBy.AnyOf { + if existingAttr, exists := existingRequirements[validator]; exists && + types.AttributesSubsetOf(g.Requirements.Attributes, existingAttr) { + return true + } + } + + return false + } + + return true + } + + return types.AttributesSubsetOf(g.Requirements.Attributes, provider[0].Attributes) +} + +// validate does validation for provided deployment group +func (g *GroupSpec) validate() error { + if g.Name == "" { + return fmt.Errorf("empty group spec name denomination") + } + + if err := g.GetResourceUnits().Validate(); err != nil { + return err + } + + if err := g.validatePricing(); err != nil { + return err + } + + return nil +} + +func (g *GroupSpec) validatePricing() error { + var price sdk.DecCoin + + mem := sdk.NewInt(0) + + for idx, resource := range g.Resources { + if err := resource.validatePricing(); err != nil { + return fmt.Errorf("group %v: %w", g.GetName(), err) + } + + // all must be same denomination + if idx == 0 { + price = resource.FullPrice() + } else { + rprice := resource.FullPrice() + if rprice.Denom != price.Denom { + return fmt.Errorf("multi-denonimation group: (%v == %v fails)", rprice.Denom, price.Denom) + } + price = price.Add(rprice) + } + + memCount := sdk.NewInt(0) + if u := resource.Memory; u != nil { + memCount.Add(sdk.NewIntFromUint64(u.Quantity.Value())) + } + + mem = mem.Add(memCount.Mul(sdk.NewIntFromUint64(uint64(resource.Count)))) + } + + return nil +} diff --git a/go/node/deployment/v1beta4/groupspec.pb.go b/go/node/deployment/v1beta4/groupspec.pb.go new file mode 100644 index 00000000..52f99c84 --- /dev/null +++ b/go/node/deployment/v1beta4/groupspec.pb.go @@ -0,0 +1,427 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/groupspec.proto + +package v1beta4 + +import ( + fmt "fmt" + v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GroupSpec stores group specifications +type GroupSpec struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Requirements v1.PlacementRequirements `protobuf:"bytes,2,opt,name=requirements,proto3" json:"requirements" yaml:"requirements"` + Resources ResourceUnits `protobuf:"bytes,3,rep,name=resources,proto3,castrepeated=ResourceUnits" json:"resources" yaml:"resources"` +} + +func (m *GroupSpec) Reset() { *m = GroupSpec{} } +func (m *GroupSpec) String() string { return proto.CompactTextString(m) } +func (*GroupSpec) ProtoMessage() {} +func (*GroupSpec) Descriptor() ([]byte, []int) { + return fileDescriptor_bd2049f4b23a57e8, []int{0} +} +func (m *GroupSpec) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GroupSpec) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GroupSpec.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GroupSpec) XXX_Merge(src proto.Message) { + xxx_messageInfo_GroupSpec.Merge(m, src) +} +func (m *GroupSpec) XXX_Size() int { + return m.Size() +} +func (m *GroupSpec) XXX_DiscardUnknown() { + xxx_messageInfo_GroupSpec.DiscardUnknown(m) +} + +var xxx_messageInfo_GroupSpec proto.InternalMessageInfo + +func init() { + proto.RegisterType((*GroupSpec)(nil), "akash.deployment.v1beta4.GroupSpec") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/groupspec.proto", fileDescriptor_bd2049f4b23a57e8) +} + +var fileDescriptor_bd2049f4b23a57e8 = []byte{ + // 380 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0x3f, 0x4b, 0xfb, 0x40, + 0x18, 0xc7, 0x93, 0xf6, 0xc7, 0x0f, 0x9b, 0x2a, 0x48, 0x14, 0x0c, 0x1d, 0x72, 0x25, 0x83, 0x04, + 0x8a, 0x77, 0xb4, 0x8a, 0x43, 0xc1, 0x25, 0x8b, 0xab, 0x44, 0x44, 0x70, 0xbb, 0xa4, 0x47, 0x1a, + 0xda, 0xe4, 0xe2, 0xdd, 0xa5, 0x52, 0x70, 0x70, 0x74, 0xf4, 0x25, 0x74, 0xf6, 0x6d, 0xb8, 0x74, + 0xec, 0xe8, 0x14, 0xa5, 0x5d, 0xa4, 0x63, 0x5f, 0x81, 0xe4, 0x4f, 0x49, 0x0b, 0x76, 0xbb, 0xef, + 0xc3, 0xe7, 0x79, 0xbe, 0xdf, 0xe7, 0x1e, 0xc5, 0xc4, 0x03, 0xcc, 0xfb, 0xa8, 0x47, 0xa2, 0x21, + 0x1d, 0x07, 0x24, 0x14, 0x68, 0xd4, 0x76, 0x88, 0xc0, 0x17, 0xc8, 0x63, 0x34, 0x8e, 0x78, 0x44, + 0x5c, 0x18, 0x31, 0x2a, 0xa8, 0xaa, 0x65, 0x24, 0x2c, 0x49, 0x58, 0x90, 0x8d, 0x63, 0x8f, 0x7a, + 0x34, 0x83, 0x50, 0xfa, 0xca, 0xf9, 0x46, 0x31, 0xd9, 0xc1, 0x9c, 0x20, 0x2c, 0x04, 0xf3, 0x9d, + 0x58, 0x10, 0x8e, 0x46, 0xed, 0x52, 0x15, 0x64, 0x6b, 0x67, 0x06, 0x46, 0x38, 0x8d, 0x99, 0x4b, + 0xe2, 0xd0, 0x17, 0x39, 0x6c, 0x7c, 0x54, 0x94, 0xda, 0x75, 0x1a, 0xed, 0x36, 0x22, 0xae, 0xda, + 0x52, 0xfe, 0x85, 0x38, 0x20, 0x9a, 0xdc, 0x94, 0xcd, 0x9a, 0x75, 0xb2, 0x4c, 0x40, 0xa6, 0x57, + 0x09, 0xa8, 0x8f, 0x71, 0x30, 0xec, 0x1a, 0xa9, 0x32, 0xec, 0xac, 0xa8, 0xbe, 0xc8, 0xca, 0x3e, + 0x23, 0x8f, 0xb1, 0xcf, 0x48, 0xea, 0xc2, 0xb5, 0x4a, 0x53, 0x36, 0xeb, 0x1d, 0x04, 0xf3, 0xcd, + 0xd2, 0xa4, 0xb0, 0x4c, 0x0a, 0x47, 0x6d, 0x78, 0x33, 0xc4, 0x6e, 0xc6, 0xda, 0x1b, 0x6d, 0x56, + 0x6b, 0x9a, 0x00, 0x69, 0x99, 0x80, 0xad, 0x61, 0xab, 0x04, 0x1c, 0xe5, 0x96, 0x9b, 0x55, 0xc3, + 0xde, 0x82, 0xd4, 0x67, 0xa5, 0xb6, 0xde, 0x89, 0x6b, 0xd5, 0x66, 0xd5, 0xac, 0x77, 0x4e, 0xe1, + 0xae, 0x8f, 0x85, 0x76, 0x81, 0xde, 0x85, 0xbe, 0xb0, 0x2e, 0x0b, 0xd7, 0x72, 0xc0, 0x2a, 0x01, + 0x87, 0x6b, 0xcb, 0xa2, 0x64, 0xbc, 0x7f, 0x81, 0x83, 0xcd, 0x36, 0x6e, 0x97, 0x7c, 0x77, 0xef, + 0x75, 0x02, 0xa4, 0x9f, 0x09, 0x90, 0xac, 0xfb, 0xe9, 0x5c, 0x97, 0x67, 0x73, 0x5d, 0xfe, 0x9e, + 0xeb, 0xf2, 0xdb, 0x42, 0x97, 0x66, 0x0b, 0x5d, 0xfa, 0x5c, 0xe8, 0xd2, 0xc3, 0x95, 0xe7, 0x8b, + 0x7e, 0xec, 0x40, 0x97, 0x06, 0x28, 0x0b, 0x76, 0x16, 0x12, 0xf1, 0x44, 0xd9, 0xa0, 0x50, 0x38, + 0xf2, 0x91, 0x47, 0x51, 0x48, 0x7b, 0xe4, 0x8f, 0x8b, 0x39, 0xff, 0xb3, 0x2b, 0x9d, 0xff, 0x06, + 0x00, 0x00, 0xff, 0xff, 0x44, 0x7d, 0x8c, 0x34, 0x58, 0x02, 0x00, 0x00, +} + +func (m *GroupSpec) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GroupSpec) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GroupSpec) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Resources) > 0 { + for iNdEx := len(m.Resources) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Resources[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupspec(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Requirements.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGroupspec(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGroupspec(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintGroupspec(dAtA []byte, offset int, v uint64) int { + offset -= sovGroupspec(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GroupSpec) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovGroupspec(uint64(l)) + } + l = m.Requirements.Size() + n += 1 + l + sovGroupspec(uint64(l)) + if len(m.Resources) > 0 { + for _, e := range m.Resources { + l = e.Size() + n += 1 + l + sovGroupspec(uint64(l)) + } + } + return n +} + +func sovGroupspec(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGroupspec(x uint64) (n int) { + return sovGroupspec(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GroupSpec) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GroupSpec: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GroupSpec: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGroupspec + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGroupspec + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Requirements", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupspec + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupspec + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Requirements.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGroupspec + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGroupspec + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGroupspec + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Resources = append(m.Resources, ResourceUnit{}) + if err := m.Resources[len(m.Resources)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGroupspec(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGroupspec + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGroupspec(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupspec + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupspec + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGroupspec + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGroupspec + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGroupspec + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGroupspec + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGroupspec = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGroupspec = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGroupspec = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/id.go b/go/node/deployment/v1beta4/id.go new file mode 100644 index 00000000..39b86b3c --- /dev/null +++ b/go/node/deployment/v1beta4/id.go @@ -0,0 +1,103 @@ +package v1beta4 + +import ( + "fmt" + "strconv" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +// Equals method compares specific deployment with provided deployment +func (id DeploymentID) Equals(other DeploymentID) bool { + return id.Owner == other.Owner && id.DSeq == other.DSeq +} + +// Validate method for DeploymentID and returns nil +func (id DeploymentID) Validate() error { + _, err := sdk.AccAddressFromBech32(id.Owner) + switch { + case err != nil: + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "DeploymentID: Invalid Owner Address") + case id.DSeq == 0: + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "DeploymentID: Invalid Deployment Sequence") + } + return nil +} + +// String method for deployment IDs +func (id DeploymentID) String() string { + return fmt.Sprintf("%s/%d", id.Owner, id.DSeq) +} + +func (id DeploymentID) GetOwnerAddress() (sdk.Address, error) { + return sdk.AccAddressFromBech32(id.Owner) +} + +func ParseDeploymentID(val string) (DeploymentID, error) { + parts := strings.Split(val, "/") + return ParseDeploymentPath(parts) +} + +// ParseDeploymentPath returns DeploymentID details with provided queries, and return +// error if occurred due to wrong query +func ParseDeploymentPath(parts []string) (DeploymentID, error) { + if len(parts) != 2 { + return DeploymentID{}, ErrInvalidIDPath + } + + owner, err := sdk.AccAddressFromBech32(parts[0]) + if err != nil { + return DeploymentID{}, err + } + + dseq, err := strconv.ParseUint(parts[1], 10, 64) + if err != nil { + return DeploymentID{}, err + } + + return DeploymentID{ + Owner: owner.String(), + DSeq: dseq, + }, nil +} + +// MakeGroupID returns GroupID instance with provided deployment details +// and group sequence number. +func MakeGroupID(id DeploymentID, gseq uint32) GroupID { + return GroupID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: gseq, + } +} + +// DeploymentID method returns DeploymentID details with specific group details +func (id GroupID) DeploymentID() DeploymentID { + return DeploymentID{ + Owner: id.Owner, + DSeq: id.DSeq, + } +} + +// Equals method compares specific group with provided group +func (id GroupID) Equals(other GroupID) bool { + return id.DeploymentID().Equals(other.DeploymentID()) && id.GSeq == other.GSeq +} + +// Validate method for GroupID and returns nil +func (id GroupID) Validate() error { + if err := id.DeploymentID().Validate(); err != nil { + return sdkerrors.Wrap(err, "GroupID: Invalid DeploymentID") + } + if id.GSeq == 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "GroupID: Invalid Group Sequence") + } + return nil +} + +// String method provides human readable representation of GroupID. +func (id GroupID) String() string { + return fmt.Sprintf("%s/%d", id.DeploymentID(), id.GSeq) +} diff --git a/go/node/deployment/v1beta4/key.go b/go/node/deployment/v1beta4/key.go new file mode 100644 index 00000000..23c29325 --- /dev/null +++ b/go/node/deployment/v1beta4/key.go @@ -0,0 +1,20 @@ +package v1beta4 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "deployment" + + // StoreKey is the store key string for deployment + StoreKey = ModuleName + + // RouterKey is the message route for deployment + RouterKey = ModuleName +) + +func DeploymentPrefix() []byte { + return []byte{0x01} +} + +func GroupPrefix() []byte { + return []byte{0x02} +} diff --git a/go/node/deployment/v1beta4/migrate/v1beta3.go b/go/node/deployment/v1beta4/migrate/v1beta3.go new file mode 100644 index 00000000..5c04ff61 --- /dev/null +++ b/go/node/deployment/v1beta4/migrate/v1beta3.go @@ -0,0 +1,51 @@ +package migrate + +import ( + "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + amigrate "github.com/akash-network/akash-api/go/node/types/attributes/v1/migrate" + rmigrate "github.com/akash-network/akash-api/go/node/types/resources/v1/migrate" +) + +func ResourceUnitFromV1Beta3(id uint32, from v1beta3.ResourceUnit) v1beta4.ResourceUnit { + return v1beta4.ResourceUnit{ + Resources: rmigrate.ResourcesFromV1Beta3(id, from.Resources), + Count: from.Count, + Price: from.Price, + } +} + +func ResourcesUnitsFromV1Beta3(from []v1beta3.ResourceUnit) v1beta4.ResourceUnits { + res := make(v1beta4.ResourceUnits, 0, len(from)) + + for i, oval := range from { + res = append(res, ResourceUnitFromV1Beta3(uint32(i+1), oval)) + } + + return res +} + +func GroupIDFromV1Beta3(from v1beta3.GroupID) v1beta4.GroupID { + return v1beta4.GroupID{ + Owner: from.Owner, + DSeq: from.DSeq, + GSeq: from.GSeq, + } +} + +func GroupSpecFromV1Beta3(from v1beta3.GroupSpec) v1beta4.GroupSpec { + return v1beta4.GroupSpec{ + Name: from.Name, + Requirements: amigrate.PlacementRequirementsFromV1Beta3(from.Requirements), + Resources: ResourcesUnitsFromV1Beta3(from.Resources), + } +} + +func GroupFromV1Beta3(from v1beta3.Group) v1beta4.Group { + return v1beta4.Group{ + GroupID: GroupIDFromV1Beta3(from.GroupID), + State: v1beta4.Group_State(from.State), + GroupSpec: GroupSpecFromV1Beta3(from.GroupSpec), + CreatedAt: from.CreatedAt, + } +} diff --git a/go/node/deployment/v1beta4/msgs.go b/go/node/deployment/v1beta4/msgs.go new file mode 100644 index 00000000..0aceb9c8 --- /dev/null +++ b/go/node/deployment/v1beta4/msgs.go @@ -0,0 +1,333 @@ +package v1beta4 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + MsgTypeCreateDeployment = "create-deployment" + MsgTypeDepositDeployment = "deposit-deployment" + MsgTypeUpdateDeployment = "update-deployment" + MsgTypeCloseDeployment = "close-deployment" + MsgTypeCloseGroup = "close-group" + MsgTypePauseGroup = "pause-group" + MsgTypeStartGroup = "start-group" +) + +var ( + _, _, _, _ sdk.Msg = &MsgCreateDeployment{}, &MsgUpdateDeployment{}, &MsgCloseDeployment{}, &MsgCloseGroup{} +) + +// NewMsgCreateDeployment creates a new MsgCreateDeployment instance +func NewMsgCreateDeployment(id DeploymentID, groups []GroupSpec, version []byte, + deposit sdk.Coin, depositor sdk.AccAddress) *MsgCreateDeployment { + return &MsgCreateDeployment{ + ID: id, + Groups: groups, + Version: version, + Deposit: deposit, + Depositor: depositor.String(), + } +} + +// Route implements the sdk.Msg interface +func (msg MsgCreateDeployment) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgCreateDeployment) Type() string { return MsgTypeCreateDeployment } + +// GetSignBytes encodes the message for signing +func (msg MsgCreateDeployment) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgCreateDeployment) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// ValidateBasic does basic validation like check owner and groups length +func (msg MsgCreateDeployment) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + if err := msg.Deposit.Validate(); err != nil { + return err + } + if len(msg.Groups) == 0 { + return ErrInvalidGroups + } + + if len(msg.Version) == 0 { + return ErrEmptyVersion + } + + if len(msg.Version) != ManifestVersionLength { + return ErrInvalidVersion + } + + for _, gs := range msg.Groups { + err := gs.ValidateBasic() + if err != nil { + return err + } + + // deposit must be same denom as price + if !msg.Deposit.IsZero() { + if gdenom := gs.Price().Denom; gdenom != msg.Deposit.Denom { + return sdkerrors.Wrapf(ErrInvalidDeposit, "Mismatched denominations (%v != %v)", msg.Deposit.Denom, gdenom) + } + } + } + + _, err := sdk.AccAddressFromBech32(msg.Depositor) + if err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreateDeployment: Invalid Depositor Address") + } + + return nil +} + +// NewMsgDepositDeployment creates a new MsgDepositDeployment instance +func NewMsgDepositDeployment(id DeploymentID, amount sdk.Coin, depositor string) *MsgDepositDeployment { + return &MsgDepositDeployment{ + ID: id, + Amount: amount, + Depositor: depositor, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgDepositDeployment) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgDepositDeployment) Type() string { return MsgTypeDepositDeployment } + +// GetSignBytes encodes the message for signing +func (msg MsgDepositDeployment) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgDepositDeployment) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// ValidateBasic does basic validation like check owner and groups length +func (msg MsgDepositDeployment) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + + if msg.Amount.IsZero() { + return ErrInvalidDeposit + } + + _, err := sdk.AccAddressFromBech32(msg.Depositor) + if err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgDepositDeployment: Invalid Depositor Address") + } + + return nil +} + +// NewMsgUpdateDeployment creates a new MsgUpdateDeployment instance +func NewMsgUpdateDeployment(id DeploymentID, version []byte) *MsgUpdateDeployment { + return &MsgUpdateDeployment{ + ID: id, + Version: version, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgUpdateDeployment) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgUpdateDeployment) Type() string { return MsgTypeUpdateDeployment } + +// ValidateBasic does basic validation +func (msg MsgUpdateDeployment) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + + if len(msg.Version) == 0 { + return ErrEmptyVersion + } + + if len(msg.Version) != ManifestVersionLength { + return ErrInvalidVersion + } + + return nil +} + +// GetSignBytes encodes the message for signing +func (msg MsgUpdateDeployment) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgUpdateDeployment) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgCloseDeployment creates a new MsgCloseDeployment instance +func NewMsgCloseDeployment(id DeploymentID) *MsgCloseDeployment { + return &MsgCloseDeployment{ + ID: id, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgCloseDeployment) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgCloseDeployment) Type() string { return MsgTypeCloseDeployment } + +// ValidateBasic does basic validation with deployment details +func (msg MsgCloseDeployment) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + return nil +} + +// GetSignBytes encodes the message for signing +func (msg MsgCloseDeployment) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgCloseDeployment) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgCloseGroup creates a new MsgCloseGroup instance +func NewMsgCloseGroup(id GroupID) *MsgCloseGroup { + return &MsgCloseGroup{ + ID: id, + } +} + +// Route implements the sdk.Msg interface for routing +func (msg MsgCloseGroup) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface exposing message type +func (msg MsgCloseGroup) Type() string { return MsgTypeCloseGroup } + +// ValidateBasic calls underlying GroupID.Validate() check and returns result +func (msg MsgCloseGroup) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + return nil +} + +// GetSignBytes encodes the message for signing +func (msg MsgCloseGroup) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgCloseGroup) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgPauseGroup creates a new MsgPauseGroup instance +func NewMsgPauseGroup(id GroupID) *MsgPauseGroup { + return &MsgPauseGroup{ + ID: id, + } +} + +// Route implements the sdk.Msg interface for routing +func (msg MsgPauseGroup) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface exposing message type +func (msg MsgPauseGroup) Type() string { return MsgTypePauseGroup } + +// ValidateBasic calls underlying GroupID.Validate() check and returns result +func (msg MsgPauseGroup) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + return nil +} + +// GetSignBytes encodes the message for signing +func (msg MsgPauseGroup) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgPauseGroup) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgStartGroup creates a new MsgStartGroup instance +func NewMsgStartGroup(id GroupID) *MsgStartGroup { + return &MsgStartGroup{ + ID: id, + } +} + +// Route implements the sdk.Msg interface for routing +func (msg MsgStartGroup) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface exposing message type +func (msg MsgStartGroup) Type() string { return MsgTypeStartGroup } + +// ValidateBasic calls underlying GroupID.Validate() check and returns result +func (msg MsgStartGroup) ValidateBasic() error { + if err := msg.ID.Validate(); err != nil { + return err + } + return nil +} + +// GetSignBytes encodes the message for signing +func (msg MsgStartGroup) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgStartGroup) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.ID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} diff --git a/go/node/deployment/v1beta4/msgs_test.go b/go/node/deployment/v1beta4/msgs_test.go new file mode 100644 index 00000000..6d4099a2 --- /dev/null +++ b/go/node/deployment/v1beta4/msgs_test.go @@ -0,0 +1,83 @@ +package v1beta4_test + +import ( + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/stretchr/testify/require" + + testutil "github.com/akash-network/akash-api/go/node/client/testutil/v1beta3" + types "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + sdktestutil "github.com/akash-network/akash-api/go/testutil" +) + +type testMsg struct { + msg sdk.Msg + err error +} + +func TestVersionValidation(t *testing.T) { + tests := []testMsg{ + { + msg: &types.MsgCreateDeployment{ + ID: testutil.DeploymentID(t), + Version: testutil.DeploymentVersion(t), + Groups: []types.GroupSpec{ + testutil.GroupSpec(t), + }, + Depositor: testutil.AccAddress(t).String(), + Deposit: sdktestutil.AkashCoin(t, 0), + }, + err: nil, + }, + { + msg: &types.MsgCreateDeployment{ + ID: testutil.DeploymentID(t), + Version: []byte(""), + Groups: []types.GroupSpec{ + testutil.GroupSpec(t), + }, + Depositor: testutil.AccAddress(t).String(), + Deposit: sdktestutil.AkashCoin(t, 0), + }, + err: types.ErrEmptyVersion, + }, + { + msg: &types.MsgCreateDeployment{ + ID: testutil.DeploymentID(t), + Version: []byte("invalidversion"), + Groups: []types.GroupSpec{ + testutil.GroupSpec(t), + }, + Depositor: testutil.AccAddress(t).String(), + Deposit: sdktestutil.AkashCoin(t, 0), + }, + err: types.ErrInvalidVersion, + }, + { + msg: &types.MsgUpdateDeployment{ + ID: testutil.DeploymentID(t), + Version: testutil.DeploymentVersion(t), + }, + err: nil, + }, + { + msg: &types.MsgUpdateDeployment{ + ID: testutil.DeploymentID(t), + Version: []byte(""), + }, + err: types.ErrEmptyVersion, + }, + { + msg: &types.MsgUpdateDeployment{ + ID: testutil.DeploymentID(t), + Version: []byte("invalidversion"), + }, + err: types.ErrInvalidVersion, + }, + } + + for _, test := range tests { + require.Equal(t, test.err, test.msg.ValidateBasic()) + } +} diff --git a/go/node/deployment/v1beta4/params.go b/go/node/deployment/v1beta4/params.go new file mode 100644 index 00000000..cf485eed --- /dev/null +++ b/go/node/deployment/v1beta4/params.go @@ -0,0 +1,91 @@ +package v1beta4 + +import ( + "fmt" + "math" + + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +const ( + keyMinDeposits = "MinDeposits" +) + +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair([]byte(keyMinDeposits), &p.MinDeposits, validateMinDeposits), + } +} + +func DefaultParams() Params { + return Params{ + MinDeposits: sdk.Coins{ + sdk.NewCoin("uakt", sdk.NewInt(500000)), + }, + } +} + +func (p Params) Validate() error { + if err := validateMinDeposits(p.MinDeposits); err != nil { + return err + } + return nil +} + +func (p Params) ValidateDeposit(amt sdk.Coin) error { + min, err := p.MinDepositFor(amt.Denom) + + if err != nil { + return err + } + + if amt.IsGTE(min) { + return nil + } + + return fmt.Errorf("%w: Deposit too low - %v < %v", ErrInvalidDeposit, amt.Amount, min) +} + +func (p Params) MinDepositFor(denom string) (sdk.Coin, error) { + for _, minDeposit := range p.MinDeposits { + if minDeposit.Denom == denom { + return sdk.NewCoin(minDeposit.Denom, minDeposit.Amount), nil + } + } + + return sdk.NewInt64Coin(denom, math.MaxInt64), fmt.Errorf("%w: Invalid deposit denomination %v", ErrInvalidDeposit, denom) +} + +func validateMinDeposits(i interface{}) error { + vals, ok := i.(sdk.Coins) + if !ok { + return fmt.Errorf("%w: Min Deposits - invalid type: %T", ErrInvalidParam, i) + } + + check := make(map[string]bool) + + for _, minDeposit := range vals { + if _, exists := check[minDeposit.Denom]; exists { + return fmt.Errorf("duplicate Min Deposit for denom (%#v)", minDeposit) + } + + check[minDeposit.Denom] = true + + if minDeposit.Amount.Uint64() >= math.MaxInt32 { + return fmt.Errorf("%w: Min Deposit (%v) - too large: %v", ErrInvalidParam, minDeposit.Denom, minDeposit.Amount.Uint64()) + } + } + + if _, exists := check["uakt"]; !exists { + return fmt.Errorf("%w: Min Deposits - uakt not given: %#v", ErrInvalidParam, vals) + } + + return nil +} diff --git a/go/node/deployment/v1beta4/params.pb.go b/go/node/deployment/v1beta4/params.pb.go new file mode 100644 index 00000000..54dc604b --- /dev/null +++ b/go/node/deployment/v1beta4/params.pb.go @@ -0,0 +1,340 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/params.proto + +package v1beta4 + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params defines the parameters for the x/deployment package +type Params struct { + MinDeposits github_com_cosmos_cosmos_sdk_types.Coins `protobuf:"bytes,1,rep,name=min_deposits,json=minDeposits,proto3,castrepeated=github.com/cosmos/cosmos-sdk/types.Coins" json:"min_deposits" yaml:"min_deposits"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_31b8da9fdb2b2cf0, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetMinDeposits() github_com_cosmos_cosmos_sdk_types.Coins { + if m != nil { + return m.MinDeposits + } + return nil +} + +func init() { + proto.RegisterType((*Params)(nil), "akash.deployment.v1beta4.Params") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/params.proto", fileDescriptor_31b8da9fdb2b2cf0) +} + +var fileDescriptor_31b8da9fdb2b2cf0 = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x49, 0x2d, 0xc8, 0xc9, 0xaf, 0xcc, 0x4d, 0xcd, 0x2b, 0xd1, 0x2f, 0x33, 0x4c, + 0x4a, 0x2d, 0x49, 0x34, 0xd1, 0x2f, 0x48, 0x2c, 0x4a, 0xcc, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x92, 0x00, 0x2b, 0xd3, 0x43, 0x28, 0xd3, 0x83, 0x2a, 0x93, 0x12, 0x49, 0xcf, 0x4f, + 0xcf, 0x07, 0x2b, 0xd2, 0x07, 0xb1, 0x20, 0xea, 0xa5, 0xe4, 0x92, 0xf3, 0x8b, 0x73, 0xf3, 0x8b, + 0xf5, 0x93, 0x12, 0x8b, 0x53, 0xa1, 0x26, 0x1a, 0xea, 0x27, 0xe7, 0x67, 0xe6, 0x41, 0xe4, 0x95, + 0xd6, 0x33, 0x72, 0xb1, 0x05, 0x80, 0x2d, 0x10, 0x5a, 0xc2, 0xc8, 0xc5, 0x93, 0x9b, 0x99, 0x17, + 0x9f, 0x92, 0x5a, 0x90, 0x5f, 0x9c, 0x59, 0x52, 0x2c, 0xc1, 0xa8, 0xc0, 0xac, 0xc1, 0x6d, 0x24, + 0xa9, 0x07, 0x31, 0x42, 0x0f, 0x64, 0x04, 0xd4, 0x36, 0x43, 0x3d, 0xe7, 0xfc, 0xcc, 0x3c, 0xa7, + 0xb4, 0x13, 0xf7, 0xe4, 0x19, 0x1e, 0xdd, 0x93, 0xe7, 0xf6, 0xcd, 0xcc, 0x73, 0x81, 0xea, 0x7a, + 0x75, 0x4f, 0x1e, 0xc5, 0x94, 0x4f, 0xf7, 0xe4, 0x85, 0x2b, 0x13, 0x73, 0x73, 0xac, 0x94, 0x90, + 0x45, 0x95, 0x56, 0xdd, 0x97, 0xd7, 0x48, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, + 0xd5, 0x87, 0xba, 0x12, 0x42, 0xe9, 0x16, 0xa7, 0x64, 0xeb, 0x97, 0x54, 0x16, 0xa4, 0x16, 0x83, + 0xad, 0x29, 0x0e, 0xe2, 0xce, 0x45, 0x98, 0xef, 0x14, 0x7e, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, + 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x70, 0xe1, 0xb1, 0x1c, 0xc3, 0x8d, + 0xc7, 0x72, 0x0c, 0x51, 0xb6, 0x48, 0x06, 0x82, 0x83, 0x49, 0x37, 0x2f, 0xb5, 0xa4, 0x3c, 0xbf, + 0x28, 0x1b, 0xca, 0x4b, 0x2c, 0xc8, 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, 0x4f, 0x49, 0xc5, 0x12, + 0xce, 0x49, 0x6c, 0xe0, 0x10, 0x31, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x2f, 0x95, 0x27, 0x43, + 0x8a, 0x01, 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.MinDeposits) > 0 { + for iNdEx := len(m.MinDeposits) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.MinDeposits[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.MinDeposits) > 0 { + for _, e := range m.MinDeposits { + l = e.Size() + n += 1 + l + sovParams(uint64(l)) + } + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field MinDeposits", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.MinDeposits = append(m.MinDeposits, types.Coin{}) + if err := m.MinDeposits[len(m.MinDeposits)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/query.pb.go b/go/node/deployment/v1beta4/query.pb.go new file mode 100644 index 00000000..597b8bca --- /dev/null +++ b/go/node/deployment/v1beta4/query.pb.go @@ -0,0 +1,1628 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/query.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + v1beta3 "github.com/akash-network/akash-api/go/node/escrow/v1beta3" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryDeploymentsRequest is request type for the Query/Deployments RPC method +type QueryDeploymentsRequest struct { + Filters DeploymentFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDeploymentsRequest) Reset() { *m = QueryDeploymentsRequest{} } +func (m *QueryDeploymentsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDeploymentsRequest) ProtoMessage() {} +func (*QueryDeploymentsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{0} +} +func (m *QueryDeploymentsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDeploymentsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDeploymentsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDeploymentsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDeploymentsRequest.Merge(m, src) +} +func (m *QueryDeploymentsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDeploymentsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDeploymentsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDeploymentsRequest proto.InternalMessageInfo + +func (m *QueryDeploymentsRequest) GetFilters() DeploymentFilters { + if m != nil { + return m.Filters + } + return DeploymentFilters{} +} + +func (m *QueryDeploymentsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryDeploymentsResponse is response type for the Query/Deployments RPC method +type QueryDeploymentsResponse struct { + Deployments DeploymentResponses `protobuf:"bytes,1,rep,name=deployments,proto3,castrepeated=DeploymentResponses" json:"deployments"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryDeploymentsResponse) Reset() { *m = QueryDeploymentsResponse{} } +func (m *QueryDeploymentsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDeploymentsResponse) ProtoMessage() {} +func (*QueryDeploymentsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{1} +} +func (m *QueryDeploymentsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDeploymentsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDeploymentsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDeploymentsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDeploymentsResponse.Merge(m, src) +} +func (m *QueryDeploymentsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDeploymentsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDeploymentsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDeploymentsResponse proto.InternalMessageInfo + +func (m *QueryDeploymentsResponse) GetDeployments() DeploymentResponses { + if m != nil { + return m.Deployments + } + return nil +} + +func (m *QueryDeploymentsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryDeploymentRequest is request type for the Query/Deployment RPC method +type QueryDeploymentRequest struct { + ID DeploymentID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryDeploymentRequest) Reset() { *m = QueryDeploymentRequest{} } +func (m *QueryDeploymentRequest) String() string { return proto.CompactTextString(m) } +func (*QueryDeploymentRequest) ProtoMessage() {} +func (*QueryDeploymentRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{2} +} +func (m *QueryDeploymentRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDeploymentRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDeploymentRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDeploymentRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDeploymentRequest.Merge(m, src) +} +func (m *QueryDeploymentRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryDeploymentRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDeploymentRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDeploymentRequest proto.InternalMessageInfo + +func (m *QueryDeploymentRequest) GetID() DeploymentID { + if m != nil { + return m.ID + } + return DeploymentID{} +} + +// QueryDeploymentResponse is response type for the Query/Deployment RPC method +type QueryDeploymentResponse struct { + Deployment Deployment `protobuf:"bytes,1,opt,name=deployment,proto3" json:"deployment" yaml:"deployment"` + Groups []Group `protobuf:"bytes,2,rep,name=groups,proto3" json:"groups" yaml:"groups"` + EscrowAccount v1beta3.Account `protobuf:"bytes,3,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` +} + +func (m *QueryDeploymentResponse) Reset() { *m = QueryDeploymentResponse{} } +func (m *QueryDeploymentResponse) String() string { return proto.CompactTextString(m) } +func (*QueryDeploymentResponse) ProtoMessage() {} +func (*QueryDeploymentResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{3} +} +func (m *QueryDeploymentResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryDeploymentResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryDeploymentResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryDeploymentResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryDeploymentResponse.Merge(m, src) +} +func (m *QueryDeploymentResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryDeploymentResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryDeploymentResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryDeploymentResponse proto.InternalMessageInfo + +func (m *QueryDeploymentResponse) GetDeployment() Deployment { + if m != nil { + return m.Deployment + } + return Deployment{} +} + +func (m *QueryDeploymentResponse) GetGroups() []Group { + if m != nil { + return m.Groups + } + return nil +} + +func (m *QueryDeploymentResponse) GetEscrowAccount() v1beta3.Account { + if m != nil { + return m.EscrowAccount + } + return v1beta3.Account{} +} + +// QueryGroupRequest is request type for the Query/Group RPC method +type QueryGroupRequest struct { + ID GroupID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryGroupRequest) Reset() { *m = QueryGroupRequest{} } +func (m *QueryGroupRequest) String() string { return proto.CompactTextString(m) } +func (*QueryGroupRequest) ProtoMessage() {} +func (*QueryGroupRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{4} +} +func (m *QueryGroupRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGroupRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGroupRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGroupRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGroupRequest.Merge(m, src) +} +func (m *QueryGroupRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryGroupRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGroupRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGroupRequest proto.InternalMessageInfo + +func (m *QueryGroupRequest) GetID() GroupID { + if m != nil { + return m.ID + } + return GroupID{} +} + +// QueryGroupResponse is response type for the Query/Group RPC method +type QueryGroupResponse struct { + Group Group `protobuf:"bytes,1,opt,name=group,proto3" json:"group"` +} + +func (m *QueryGroupResponse) Reset() { *m = QueryGroupResponse{} } +func (m *QueryGroupResponse) String() string { return proto.CompactTextString(m) } +func (*QueryGroupResponse) ProtoMessage() {} +func (*QueryGroupResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_17bc7a7d7b435f28, []int{5} +} +func (m *QueryGroupResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryGroupResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryGroupResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryGroupResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryGroupResponse.Merge(m, src) +} +func (m *QueryGroupResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryGroupResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryGroupResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryGroupResponse proto.InternalMessageInfo + +func (m *QueryGroupResponse) GetGroup() Group { + if m != nil { + return m.Group + } + return Group{} +} + +func init() { + proto.RegisterType((*QueryDeploymentsRequest)(nil), "akash.deployment.v1beta4.QueryDeploymentsRequest") + proto.RegisterType((*QueryDeploymentsResponse)(nil), "akash.deployment.v1beta4.QueryDeploymentsResponse") + proto.RegisterType((*QueryDeploymentRequest)(nil), "akash.deployment.v1beta4.QueryDeploymentRequest") + proto.RegisterType((*QueryDeploymentResponse)(nil), "akash.deployment.v1beta4.QueryDeploymentResponse") + proto.RegisterType((*QueryGroupRequest)(nil), "akash.deployment.v1beta4.QueryGroupRequest") + proto.RegisterType((*QueryGroupResponse)(nil), "akash.deployment.v1beta4.QueryGroupResponse") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/query.proto", fileDescriptor_17bc7a7d7b435f28) +} + +var fileDescriptor_17bc7a7d7b435f28 = []byte{ + // 684 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x55, 0xcd, 0x6e, 0xd3, 0x4c, + 0x14, 0x8d, 0xdd, 0x9f, 0x4f, 0x9a, 0xa8, 0x9f, 0xd4, 0x01, 0x81, 0x15, 0xc0, 0x2e, 0x56, 0x49, + 0x4a, 0x7f, 0x3c, 0x24, 0x61, 0x55, 0xd4, 0x05, 0x56, 0xd4, 0xaa, 0xb0, 0xa1, 0xde, 0x80, 0x10, + 0x12, 0x72, 0x92, 0xa9, 0x6b, 0x35, 0xf1, 0xb8, 0x9e, 0x09, 0x55, 0xb6, 0x3c, 0x01, 0x88, 0x17, + 0x60, 0x83, 0x84, 0x58, 0xb0, 0xe2, 0x21, 0xba, 0xac, 0x84, 0x90, 0x58, 0x05, 0x94, 0xb0, 0x40, + 0x2c, 0x58, 0xf4, 0x09, 0x90, 0x67, 0x26, 0xb5, 0x21, 0x49, 0x93, 0xec, 0x12, 0xcf, 0xb9, 0xe7, + 0x9c, 0x7b, 0xcf, 0x1d, 0x1b, 0x2c, 0xbb, 0x87, 0x2e, 0x3d, 0x40, 0x75, 0x1c, 0x36, 0x48, 0xbb, + 0x89, 0x03, 0x86, 0x5e, 0x14, 0xab, 0x98, 0xb9, 0x77, 0xd1, 0x51, 0x0b, 0x47, 0x6d, 0x2b, 0x8c, + 0x08, 0x23, 0x50, 0xe3, 0x28, 0x2b, 0x41, 0x59, 0x12, 0x95, 0xbb, 0xec, 0x11, 0x8f, 0x70, 0x10, + 0x8a, 0x7f, 0x09, 0x7c, 0xee, 0xba, 0x47, 0x88, 0xd7, 0xc0, 0xc8, 0x0d, 0x7d, 0xe4, 0x06, 0x01, + 0x61, 0x2e, 0xf3, 0x49, 0x40, 0xe5, 0xe9, 0x6a, 0x8d, 0xd0, 0x26, 0xa1, 0xa8, 0xea, 0x52, 0x2c, + 0x64, 0xa4, 0x68, 0x11, 0x85, 0xae, 0xe7, 0x07, 0x1c, 0x2c, 0xb1, 0xb7, 0x47, 0xfa, 0x4b, 0x99, + 0x11, 0xd0, 0xd1, 0xad, 0x78, 0x11, 0x69, 0x85, 0x12, 0x95, 0xbf, 0x18, 0xe5, 0xd7, 0x25, 0x6e, + 0x49, 0xe0, 0x30, 0xad, 0x45, 0xe4, 0x58, 0x62, 0xca, 0x88, 0xb5, 0x43, 0x2c, 0xdb, 0x30, 0x3f, + 0x2a, 0xe0, 0xea, 0x5e, 0xec, 0xbe, 0x72, 0xce, 0x45, 0x1d, 0x7c, 0xd4, 0xc2, 0x94, 0xc1, 0x87, + 0xe0, 0xbf, 0x7d, 0xbf, 0xc1, 0x70, 0x44, 0x35, 0x65, 0x49, 0x59, 0xc9, 0x96, 0xd6, 0xac, 0x51, + 0x23, 0xb4, 0x92, 0xf2, 0x6d, 0x51, 0x62, 0xcf, 0x9e, 0x74, 0x8c, 0x8c, 0xd3, 0x67, 0x80, 0xdb, + 0x00, 0x24, 0x73, 0xd1, 0x54, 0xce, 0x97, 0xb7, 0xc4, 0x10, 0xad, 0x78, 0x88, 0x96, 0xc8, 0x4a, + 0x0e, 0xd1, 0x7a, 0xe4, 0x7a, 0x58, 0x1a, 0x71, 0x52, 0x95, 0xe6, 0x17, 0x05, 0x68, 0x83, 0x86, + 0x69, 0x48, 0x02, 0x8a, 0x61, 0x08, 0xb2, 0x89, 0xb7, 0xd8, 0xf5, 0xcc, 0x4a, 0xb6, 0x54, 0x1c, + 0xed, 0xfa, 0x1f, 0xa2, 0x3e, 0x8f, 0x7d, 0x2d, 0xf6, 0xfe, 0xe1, 0x9b, 0x71, 0x69, 0xf0, 0x8c, + 0x3a, 0x69, 0x09, 0xb8, 0x33, 0xa4, 0xad, 0xc2, 0xd8, 0xb6, 0x04, 0xd5, 0x5f, 0x7d, 0x3d, 0x03, + 0x57, 0x06, 0xdc, 0x88, 0x18, 0x6c, 0xa0, 0xfa, 0x75, 0x99, 0x40, 0x7e, 0x92, 0x04, 0x76, 0x2b, + 0x36, 0x88, 0x1b, 0xe8, 0x76, 0x0c, 0x75, 0xb7, 0xe2, 0xa8, 0x7e, 0xdd, 0xfc, 0xa4, 0x0e, 0xc4, + 0x7c, 0x3e, 0xb4, 0x26, 0x00, 0x09, 0x9d, 0xd4, 0x59, 0x9e, 0x44, 0xc7, 0x2e, 0xc4, 0x2a, 0xbf, + 0x3a, 0x46, 0xaa, 0xfe, 0xac, 0x63, 0x2c, 0xb6, 0xdd, 0x66, 0x63, 0xd3, 0x4c, 0x9e, 0x99, 0x4e, + 0x0a, 0x00, 0x9f, 0x80, 0x79, 0xbe, 0xa4, 0x54, 0x53, 0x79, 0x3c, 0xc6, 0x68, 0xa9, 0x9d, 0x18, + 0x67, 0x1b, 0x52, 0x45, 0x96, 0x9d, 0x75, 0x8c, 0x05, 0xa1, 0x20, 0xfe, 0x9b, 0x8e, 0x3c, 0x80, + 0x0f, 0xc0, 0xff, 0x62, 0xd3, 0x9f, 0xbb, 0xb5, 0x1a, 0x69, 0x05, 0x4c, 0x9b, 0xe1, 0xcd, 0xdc, + 0x90, 0x0a, 0xe2, 0x50, 0xb2, 0x97, 0xad, 0xfb, 0x02, 0x24, 0x17, 0x75, 0x41, 0x9c, 0xca, 0x87, + 0x9b, 0xb3, 0x3f, 0xdf, 0x1a, 0x19, 0xd3, 0x01, 0x8b, 0x7c, 0x6a, 0xdc, 0x48, 0x3f, 0x8f, 0xad, + 0x54, 0x1e, 0x37, 0xc7, 0x98, 0x1f, 0x12, 0xc5, 0x1e, 0x80, 0x69, 0x4e, 0x19, 0xc2, 0x3d, 0x30, + 0xc7, 0xbb, 0x90, 0xbc, 0x63, 0x87, 0x22, 0x4c, 0x8b, 0x9a, 0xd2, 0xef, 0x19, 0x30, 0xc7, 0x39, + 0xe1, 0x7b, 0x05, 0x64, 0x53, 0x17, 0x03, 0x4e, 0xbe, 0xfb, 0xfd, 0x5b, 0x9f, 0x2b, 0x4d, 0x53, + 0x22, 0xdc, 0x9b, 0xa5, 0x97, 0x9f, 0x7f, 0xbc, 0x51, 0xd7, 0xe1, 0x2a, 0x9a, 0xe0, 0x4d, 0x47, + 0x51, 0xc3, 0xa7, 0x0c, 0xbe, 0x53, 0x00, 0x48, 0xb8, 0xe0, 0x9d, 0x29, 0x6e, 0xa9, 0x30, 0x3a, + 0xfd, 0xbd, 0x9e, 0xd6, 0xa7, 0x1f, 0xec, 0x13, 0xf8, 0x5a, 0x01, 0x73, 0x7c, 0xe6, 0x70, 0x6d, + 0x8c, 0x60, 0x7a, 0x4b, 0x72, 0xeb, 0x93, 0x81, 0xa5, 0xb1, 0x0d, 0x6e, 0xac, 0x00, 0x6f, 0xa1, + 0x8b, 0xdf, 0xec, 0xc2, 0x93, 0xfd, 0xf8, 0xa4, 0xab, 0x2b, 0xa7, 0x5d, 0x5d, 0xf9, 0xde, 0xd5, + 0x95, 0x57, 0x3d, 0x3d, 0x73, 0xda, 0xd3, 0x33, 0x5f, 0x7b, 0x7a, 0xe6, 0xe9, 0x96, 0xe7, 0xb3, + 0x83, 0x56, 0xd5, 0xaa, 0x91, 0xa6, 0xa0, 0xda, 0x08, 0x30, 0x3b, 0x26, 0xd1, 0xa1, 0xfc, 0x17, + 0x7f, 0xcc, 0x3c, 0x82, 0x02, 0x52, 0xc7, 0x43, 0x44, 0xaa, 0xf3, 0xfc, 0xab, 0x50, 0xfe, 0x13, + 0x00, 0x00, 0xff, 0xff, 0x08, 0x86, 0xe1, 0x65, 0x52, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Deployments queries deployments + Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) + // Deployment queries deployment details + Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) + // Group queries group details + Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Deployments(ctx context.Context, in *QueryDeploymentsRequest, opts ...grpc.CallOption) (*QueryDeploymentsResponse, error) { + out := new(QueryDeploymentsResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Query/Deployments", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Deployment(ctx context.Context, in *QueryDeploymentRequest, opts ...grpc.CallOption) (*QueryDeploymentResponse, error) { + out := new(QueryDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Query/Deployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Group(ctx context.Context, in *QueryGroupRequest, opts ...grpc.CallOption) (*QueryGroupResponse, error) { + out := new(QueryGroupResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Query/Group", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Deployments queries deployments + Deployments(context.Context, *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) + // Deployment queries deployment details + Deployment(context.Context, *QueryDeploymentRequest) (*QueryDeploymentResponse, error) + // Group queries group details + Group(context.Context, *QueryGroupRequest) (*QueryGroupResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Deployments(ctx context.Context, req *QueryDeploymentsRequest) (*QueryDeploymentsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Deployments not implemented") +} +func (*UnimplementedQueryServer) Deployment(ctx context.Context, req *QueryDeploymentRequest) (*QueryDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Deployment not implemented") +} +func (*UnimplementedQueryServer) Group(ctx context.Context, req *QueryGroupRequest) (*QueryGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Group not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Deployments_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDeploymentsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Deployments(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Query/Deployments", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Deployments(ctx, req.(*QueryDeploymentsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Deployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryDeploymentRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Deployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Query/Deployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Deployment(ctx, req.(*QueryDeploymentRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Group_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryGroupRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Group(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Query/Group", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Group(ctx, req.(*QueryGroupRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.deployment.v1beta4.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Deployments", + Handler: _Query_Deployments_Handler, + }, + { + MethodName: "Deployment", + Handler: _Query_Deployment_Handler, + }, + { + MethodName: "Group", + Handler: _Query_Group_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/deployment/v1beta4/query.proto", +} + +func (m *QueryDeploymentsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDeploymentsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDeploymentsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryDeploymentsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDeploymentsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDeploymentsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Deployments) > 0 { + for iNdEx := len(m.Deployments) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Deployments[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryDeploymentRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDeploymentRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDeploymentRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryDeploymentResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryDeploymentResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryDeploymentResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Groups) > 0 { + for iNdEx := len(m.Groups) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Groups[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Deployment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryGroupRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGroupRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGroupRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryGroupResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryGroupResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryGroupResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Group.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryDeploymentsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Filters.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDeploymentsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Deployments) > 0 { + for _, e := range m.Deployments { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryDeploymentRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryDeploymentResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Deployment.Size() + n += 1 + l + sovQuery(uint64(l)) + if len(m.Groups) > 0 { + for _, e := range m.Groups { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + l = m.EscrowAccount.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryGroupRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryGroupResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Group.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryDeploymentsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDeploymentsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDeploymentsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDeploymentsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDeploymentsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDeploymentsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployments", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Deployments = append(m.Deployments, QueryDeploymentResponse{}) + if err := m.Deployments[len(m.Deployments)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDeploymentRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDeploymentRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDeploymentRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryDeploymentResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryDeploymentResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryDeploymentResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deployment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Deployment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Groups", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Groups = append(m.Groups, Group{}) + if err := m.Groups[len(m.Groups)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGroupRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGroupRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGroupRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryGroupResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryGroupResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryGroupResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Group", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Group.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/query.pb.gw.go b/go/node/deployment/v1beta4/query.pb.gw.go new file mode 100644 index 00000000..0a194700 --- /dev/null +++ b/go/node/deployment/v1beta4/query.pb.gw.go @@ -0,0 +1,337 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/deployment/v1beta4/query.proto + +/* +Package v1beta4 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1beta4 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_Deployments_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDeploymentsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Deployments(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Deployments_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDeploymentsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployments_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Deployments(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Deployment_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDeploymentRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Deployment(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Deployment_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryDeploymentRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Deployment_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Deployment(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Group_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGroupRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Group(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Group_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryGroupRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Group_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Group(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Deployments_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Deployment_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Group_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Deployments_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Deployments_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Deployments_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Deployment_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Deployment_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Deployment_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Group_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Group_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Group_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Deployments_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta4", "deployments", "list"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Deployment_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta4", "deployments", "info"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Group_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "deployment", "v1beta4", "groups", "info"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Deployments_0 = runtime.ForwardResponseMessage + + forward_Query_Deployment_0 = runtime.ForwardResponseMessage + + forward_Query_Group_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/deployment/v1beta4/resource_list_validation.go b/go/node/deployment/v1beta4/resource_list_validation.go new file mode 100644 index 00000000..89e965df --- /dev/null +++ b/go/node/deployment/v1beta4/resource_list_validation.go @@ -0,0 +1,187 @@ +package v1beta4 + +import ( + "errors" +) + +var ( + ErrNoGroupsPresent = errors.New("validation: no groups present") + ErrGroupEmptyName = errors.New("validation: group has empty name") +) + +// func ValidateResourceList(rlist GSpec) error { +// if rlist.GetName() == "" { +// return ErrGroupEmptyName +// } +// +// units := rlist.GetResources() +// +// if count := len(units); count > validationConfig.MaxGroupUnits { +// return fmt.Errorf("group %v: too many units (%v > %v)", rlist.GetName(), count, validationConfig.MaxGroupUnits) +// } +// +// if err := units.Validate(); err != nil { +// return fmt.Errorf("group %v: %w", rlist.GetName(), err) +// } +// +// limits := newLimits() +// +// for _, resource := range units { +// gLimits, err := validateGroupResource(resource) +// if err != nil { +// return fmt.Errorf("group %v: %w", rlist.GetName(), err) +// } +// +// limits.add(gLimits) +// } +// +// if limits.cpu.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupCPU)) || limits.cpu.LTE(sdk.ZeroInt()) { +// return fmt.Errorf("group %v: invalid total CPU (%v > %v > %v fails)", +// rlist.GetName(), validationConfig.MaxGroupCPU, limits.cpu, 0) +// } +// +// if !limits.gpu.IsZero() && (limits.gpu.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupGPU)) || limits.gpu.LTE(sdk.ZeroInt())) { +// return fmt.Errorf("group %v: invalid total GPU (%v > %v > %v fails)", +// rlist.GetName(), validationConfig.MaxGroupGPU, limits.gpu, 0) +// } +// +// if limits.memory.GT(sdk.NewIntFromUint64(validationConfig.MaxGroupMemory)) || limits.memory.LTE(sdk.ZeroInt()) { +// return fmt.Errorf("group %v: invalid total memory (%v > %v > %v fails)", +// rlist.GetName(), validationConfig.MaxGroupMemory, limits.memory, 0) +// } +// +// for i := range limits.storage { +// if limits.storage[i].GT(sdk.NewIntFromUint64(validationConfig.MaxGroupStorage)) || limits.storage[i].LTE(sdk.ZeroInt()) { +// return fmt.Errorf("group %v: invalid total storage (%v > %v > %v fails)", +// rlist.GetName(), validationConfig.MaxGroupStorage, limits.storage, 0) +// } +// } +// +// return nil +// } + +// func validateGroupResource(rg GroupResource) (resourceLimits, error) { +// limits, err := validateResourceUnits(rg.Resource.Units) +// if err != nil { +// return resourceLimits{}, err +// } +// +// if rg.Count > uint32(validationConfig.MaxUnitCount) || rg.Count < uint32(validationConfig.MinUnitCount) { +// return resourceLimits{}, fmt.Errorf("error: invalid unit count (%v > %v > %v fails)", +// validationConfig.MaxUnitCount, rg.Count, validationConfig.MinUnitCount) +// } +// +// limits.mul(rg.Count) +// +// return limits, nil +// } + +// func validateResourceUnits(units types.ResourceUnits) (resourceLimits, error) { +// limits := newLimits() +// +// val, err := validateCPU(units.CPU) +// if err != nil { +// return resourceLimits{}, err +// } +// limits.cpu = limits.cpu.Add(val) +// +// val, err = validateGPU(units.GPU) +// if err != nil { +// return resourceLimits{}, err +// } +// limits.gpu = limits.gpu.Add(val) +// +// val, err = validateMemory(units.Memory) +// if err != nil { +// return resourceLimits{}, err +// } +// limits.memory = limits.memory.Add(val) +// +// var storage []sdk.Int +// storage, err = validateStorage(units.Storage) +// if err != nil { +// return resourceLimits{}, err +// } +// +// // fixme this is not actually sum for storage usecase. +// // do we really need sum here? +// limits.storage = storage +// +// return limits, nil +// } +// +// func validateCPU(u *types.CPU) (sdk.Int, error) { +// if u == nil { +// return sdk.Int{}, fmt.Errorf("error: invalid unit CPU, cannot be nil") +// } +// if (u.Units.Value() > uint64(validationConfig.MaxUnitCPU)) || (u.Units.Value() < uint64(validationConfig.MinUnitCPU)) { +// return sdk.Int{}, fmt.Errorf("error: invalid unit CPU (%v > %v > %v fails)", +// validationConfig.MaxUnitCPU, u.Units.Value(), validationConfig.MinUnitCPU) +// } +// +// if err := u.Attributes.Validate(); err != nil { +// return sdk.Int{}, fmt.Errorf("error: invalid CPU attributes: %w", err) +// } +// +// return u.Units.Val, nil +// } +// +// func validateGPU(u *types.GPU) (sdk.Int, error) { +// if u == nil { +// return sdk.Int{}, fmt.Errorf("error: invalid unit GPU, cannot be nil") +// } +// +// if (u.Units.Value() > uint64(validationConfig.MaxUnitGPU)) || (u.Units.Value() < uint64(validationConfig.MinUnitGPU)) { +// return sdk.Int{}, fmt.Errorf("error: invalid unit GPU (%v > %v > %v fails)", +// validationConfig.MaxUnitGPU, u.Units.Value(), validationConfig.MinUnitGPU) +// } +// +// if u.Units.Value() == 0 && len(u.Attributes) > 0 { +// return sdk.Int{}, fmt.Errorf("error: invalid GPU state. attributes cannot be present if units == 0") +// } +// +// if err := u.Attributes.Validate(); err != nil { +// return sdk.Int{}, fmt.Errorf("error: invalid GPU attributes: %w", err) +// } +// +// return u.Units.Val, nil +// } +// +// func validateMemory(u *types.Memory) (sdk.Int, error) { +// if u == nil { +// return sdk.Int{}, fmt.Errorf("error: invalid unit memory, cannot be nil") +// } +// if (u.Quantity.Value() > validationConfig.MaxUnitMemory) || (u.Quantity.Value() < validationConfig.MinUnitMemory) { +// return sdk.Int{}, fmt.Errorf("error: invalid unit memory (%v > %v > %v fails)", +// validationConfig.MaxUnitMemory, u.Quantity.Value(), validationConfig.MinUnitMemory) +// } +// +// if err := u.Attributes.Validate(); err != nil { +// return sdk.Int{}, fmt.Errorf("error: invalid Memory attributes: %w", err) +// } +// +// return u.Quantity.Val, nil +// } +// +// func validateStorage(u types.Volumes) ([]sdk.Int, error) { +// if u == nil { +// return nil, fmt.Errorf("error: invalid unit storage, cannot be nil") +// } +// +// storage := make([]sdk.Int, 0, len(u)) +// +// for i := range u { +// if (u[i].Quantity.Value() > validationConfig.MaxUnitStorage) || (u[i].Quantity.Value() < validationConfig.MinUnitStorage) { +// return nil, fmt.Errorf("error: invalid unit storage (%v > %v > %v fails)", +// validationConfig.MaxUnitStorage, u[i].Quantity.Value(), validationConfig.MinUnitStorage) +// } +// +// if err := u[i].Attributes.Validate(); err != nil { +// return []sdk.Int{}, fmt.Errorf("error: invalid Storage attributes: %w", err) +// } +// +// storage = append(storage, u[i].Quantity.Val) +// } +// +// return storage, nil +// } diff --git a/go/node/deployment/v1beta4/resource_list_validation_test.go b/go/node/deployment/v1beta4/resource_list_validation_test.go new file mode 100644 index 00000000..b7e91a00 --- /dev/null +++ b/go/node/deployment/v1beta4/resource_list_validation_test.go @@ -0,0 +1,280 @@ +package v1beta4 + +// func TestValidateCPUNil(t *testing.T) { +// _, err := validateCPU(nil) +// require.Error(t, err) +// } +// +// func TestValidateGPUNil(t *testing.T) { +// _, err := validateGPU(nil) +// require.Error(t, err) +// } +// +// func TestValidateMemoryNil(t *testing.T) { +// _, err := validateMemory(nil) +// require.Error(t, err) +// } +// +// func TestValidateStorageNil(t *testing.T) { +// _, err := validateStorage(nil) +// require.Error(t, err) +// } +// +// func TestValidateCPULimits(t *testing.T) { +// _, err := validateCPU(&types.CPU{Units: types.NewResourceValue(uint64(validationConfig.MinUnitCPU - 1))}) +// require.Error(t, err) +// +// _, err = validateCPU(&types.CPU{Units: types.NewResourceValue(uint64(validationConfig.MaxUnitCPU + 1))}) +// require.Error(t, err) +// +// _, err = validateCPU(&types.CPU{Units: types.NewResourceValue(uint64(validationConfig.MinUnitCPU))}) +// require.NoError(t, err) +// +// _, err = validateCPU(&types.CPU{Units: types.NewResourceValue(uint64(validationConfig.MaxUnitCPU))}) +// require.NoError(t, err) +// } +// +// func TestValidateGPULimits(t *testing.T) { +// _, err := validateGPU(&types.GPU{Units: types.NewResourceValue(uint64(validationConfig.MinUnitGPU - 1))}) +// require.Error(t, err) +// +// _, err = validateGPU(&types.GPU{Units: types.NewResourceValue(uint64(validationConfig.MaxUnitGPU + 1))}) +// require.Error(t, err) +// +// _, err = validateGPU(&types.GPU{Units: types.NewResourceValue(uint64(validationConfig.MinUnitGPU))}) +// require.NoError(t, err) +// +// _, err = validateGPU(&types.GPU{Units: types.NewResourceValue(uint64(validationConfig.MaxUnitGPU))}) +// require.NoError(t, err) +// } +// +// func TestValidateMemoryLimits(t *testing.T) { +// _, err := validateMemory(&types.Memory{Quantity: types.NewResourceValue(validationConfig.MinUnitMemory - 1)}) +// require.Error(t, err) +// +// _, err = validateMemory(&types.Memory{Quantity: types.NewResourceValue(validationConfig.MaxUnitMemory + 1)}) +// require.Error(t, err) +// +// _, err = validateMemory(&types.Memory{Quantity: types.NewResourceValue(validationConfig.MinUnitMemory)}) +// require.NoError(t, err) +// +// _, err = validateMemory(&types.Memory{Quantity: types.NewResourceValue(validationConfig.MaxUnitMemory)}) +// require.NoError(t, err) +// } +// +// func TestValidateStorageLimits(t *testing.T) { +// _, err := validateStorage(types.Volumes{{Quantity: types.NewResourceValue(validationConfig.MinUnitStorage - 1)}}) +// require.Error(t, err) +// +// _, err = validateStorage(types.Volumes{{Quantity: types.NewResourceValue(validationConfig.MaxUnitStorage + 1)}}) +// require.Error(t, err) +// +// _, err = validateStorage(types.Volumes{{Quantity: types.NewResourceValue(validationConfig.MinUnitStorage)}}) +// require.NoError(t, err) +// +// _, err = validateStorage(types.Volumes{{Quantity: types.NewResourceValue(validationConfig.MaxUnitStorage)}}) +// require.NoError(t, err) +// } +// +// type resourceListTest struct { +// rlist types.ResourceGroup +// shouldPass bool +// expErr error +// expErrString string +// } +// +// func dummyResources(count int) GroupResources { +// return make(GroupResources, count) +// } +// +// func TestValidateResourceList(t *testing.T) { +// tests := []resourceListTest{ +// { +// rlist: GroupSpec{}, +// shouldPass: false, +// expErr: ErrGroupEmptyName, +// }, +// { +// rlist: GroupSpec{ +// Name: "test", +// Resources: dummyResources(validationConfig.MaxGroupUnits + 1), +// }, +// shouldPass: false, +// expErrString: "group test: too many units", +// }, +// { +// rlist: GroupSpec{ +// Name: "test", +// Resources: GroupResources{ +// { +// Resource: types.Resource{ +// ID: 1, +// Units: types.ResourceUnits{}, +// Count: 1, +// }, +// }, +// }, +// }, +// shouldPass: false, +// expErrString: "error: invalid unit CPU", +// }, +// { +// rlist: GroupSpec{ +// Name: "test", +// Resources: GroupResources{ +// { +// Resource: types.Resource{ +// ID: 1, +// Units: types.ResourceUnits{ +// CPU: &types.CPU{Units: types.NewResourceValue(1000)}, +// }, +// Count: 1, +// }, +// }, +// }, +// }, +// shouldPass: false, +// expErrString: "error: invalid unit GPU", +// }, +// { +// rlist: GroupSpec{ +// Name: "test", +// Resources: GroupResources{ +// { +// Resource: types.Resource{ +// ID: 1, +// Units: types.ResourceUnits{ +// CPU: &types.CPU{Units: types.NewResourceValue(1000)}, +// GPU: &types.GPU{Units: types.NewResourceValue(0)}, +// }, +// Count: 1, +// }, +// }, +// }, +// }, +// shouldPass: false, +// expErrString: "error: invalid unit memory", +// }, +// { +// rlist: GroupSpec{ +// Name: "test", +// Resources: GroupResources{ +// { +// Resource: types.Resource{ +// ID: 1, +// Units: types.ResourceUnits{ +// CPU: &types.CPU{Units: types.NewResourceValue(1000)}, +// GPU: &types.GPU{Units: types.NewResourceValue(0)}, +// Memory: &types.Memory{Quantity: types.NewResourceValue(validationConfig.MinUnitMemory)}, +// }, +// Count: 1, +// }, +// }, +// }, +// }, +// shouldPass: false, +// expErrString: "error: invalid unit storage", +// }, +// { +// rlist: GroupSpec{ +// Name: "test", +// Resources: GroupResources{ +// { +// Resource: types.Resource{ +// ID: 1, +// Units: types.ResourceUnits{ +// CPU: &types.CPU{Units: types.NewResourceValue(1000)}, +// GPU: &types.GPU{Units: types.NewResourceValue(0)}, +// Memory: &types.Memory{Quantity: types.NewResourceValue(validationConfig.MinUnitMemory)}, +// Storage: types.Volumes{}, +// }, +// Count: 1, +// }, +// }, +// }, +// }, +// shouldPass: true, +// }, +// { +// rlist: GroupSpec{ +// Name: "test", +// Resources: GroupResources{ +// { +// Resource: types.Resource{ +// ID: 1, +// Units: types.ResourceUnits{ +// CPU: &types.CPU{Units: types.NewResourceValue(uint64(validationConfig.MaxUnitCPU))}, +// GPU: &types.GPU{Units: types.NewResourceValue(0)}, +// Memory: &types.Memory{Quantity: types.NewResourceValue(validationConfig.MinUnitMemory)}, +// Storage: types.Volumes{}, +// }, +// Count: uint32(validationConfig.MaxGroupCPU/uint64(validationConfig.MaxUnitCPU)) + 1, +// }, +// }, +// }, +// }, +// shouldPass: false, +// expErrString: "invalid total CPU", +// }, +// { +// rlist: GroupSpec{ +// Name: "test", +// Resources: GroupResources{ +// { +// Resource: types.Resource{ +// ID: 1, +// Units: types.ResourceUnits{ +// CPU: &types.CPU{Units: types.NewResourceValue(1000)}, +// GPU: &types.GPU{Units: types.NewResourceValue(uint64(validationConfig.MaxUnitGPU))}, +// Memory: &types.Memory{Quantity: types.NewResourceValue(validationConfig.MinUnitMemory)}, +// Storage: types.Volumes{}, +// }, +// Count: uint32(validationConfig.MaxGroupGPU/uint64(validationConfig.MaxUnitGPU)) + 1, +// }, +// }, +// }, +// }, +// shouldPass: false, +// expErrString: "invalid total GPU", +// }, +// { +// rlist: GroupSpec{ +// Name: "test", +// Resources: GroupResources{ +// { +// Resource: types.Resource{ +// ID: 1, +// Units: types.ResourceUnits{ +// CPU: &types.CPU{Units: types.NewResourceValue(uint64(validationConfig.MinUnitCPU))}, +// GPU: &types.GPU{Units: types.NewResourceValue(0)}, +// Memory: &types.Memory{Quantity: types.NewResourceValue(validationConfig.MaxUnitMemory)}, +// Storage: types.Volumes{}, +// }, +// Count: uint32(validationConfig.MaxGroupMemory/validationConfig.MaxUnitMemory) + 1, +// }, +// }, +// }, +// }, +// shouldPass: false, +// expErrString: "invalid total memory", +// }, +// } +// +// for _, test := range tests { +// err := ValidateResourceList(test.rlist) +// if test.shouldPass { +// require.NoError(t, err) +// } else { +// require.Error(t, err) +// if test.expErr != nil { +// require.EqualError(t, err, test.expErr.Error()) +// } else if test.expErrString != "" { +// require.True(t, +// strings.Contains(err.Error(), test.expErrString), +// fmt.Sprintf("invalid error message: expected to contain (%s) != actual(%s)", test.expErrString, err.Error())) +// } else { +// require.Error(t, err) +// } +// } +// } +// } diff --git a/go/node/deployment/v1beta4/resourcelimits.go b/go/node/deployment/v1beta4/resourcelimits.go new file mode 100644 index 00000000..63b62926 --- /dev/null +++ b/go/node/deployment/v1beta4/resourcelimits.go @@ -0,0 +1,38 @@ +package v1beta4 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type resourceLimits struct { + cpu sdk.Int + gpu sdk.Int + memory sdk.Int + storage []sdk.Int +} + +func newLimits() resourceLimits { + return resourceLimits{ + cpu: sdk.ZeroInt(), + gpu: sdk.ZeroInt(), + memory: sdk.ZeroInt(), + } +} + +func (u *resourceLimits) add(rhs resourceLimits) { + u.cpu = u.cpu.Add(rhs.cpu) + u.gpu = u.gpu.Add(rhs.gpu) + u.memory = u.memory.Add(rhs.memory) + + // u.storage = u.storage.Add(rhs.storage) +} + +func (u *resourceLimits) mul(count uint32) { + u.cpu = u.cpu.MulRaw(int64(count)) + u.gpu = u.gpu.MulRaw(int64(count)) + u.memory = u.memory.MulRaw(int64(count)) + + for i := range u.storage { + u.storage[i] = u.storage[i].MulRaw(int64(count)) + } +} diff --git a/go/node/deployment/v1beta4/resourceunit.go b/go/node/deployment/v1beta4/resourceunit.go new file mode 100644 index 00000000..ee383f60 --- /dev/null +++ b/go/node/deployment/v1beta4/resourceunit.go @@ -0,0 +1,166 @@ +package v1beta4 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + + types "github.com/akash-network/akash-api/go/node/types/resources/v1" +) + +// FullPrice method returns full price of resource +func (r *ResourceUnit) FullPrice() sdk.DecCoin { + return sdk.NewDecCoinFromDec(r.Price.Denom, r.Price.Amount.MulInt64(int64(r.Count))) +} + +func (r *ResourceUnit) Dup() ResourceUnit { + return ResourceUnit{ + Resources: r.Resources.Dup(), + Count: r.Count, + Price: r.GetPrice(), + } +} + +func (r *ResourceUnit) validate() error { + // if r.Count > uint32(validationConfig.MaxUnitCount) || r.Count < uint32(validationConfig.MinUnitCount) { + // return fmt.Errorf("error: invalid unit count (%v > %v > %v fails)", + // validationConfig.MaxUnitCount, r.Count, validationConfig.MinUnitCount) + // } + + if err := validateResources(r.Resources); err != nil { + return err + } + + return nil +} + +func (r *ResourceUnit) totalResources() resourceLimits { + limits := newLimits() + + limits.cpu = limits.cpu.Add(r.CPU.Units.Val) + limits.gpu = limits.gpu.Add(r.GPU.Units.Val) + limits.memory = limits.memory.Add(r.Memory.Quantity.Val) + + storage := make([]sdk.Int, 0, len(r.Storage)) + + for _, vol := range r.Storage { + storage = append(storage, vol.Quantity.Val) + } + + // fixme this is not actually sum for storage usecase. + // do we really need sum here? + limits.storage = storage + + limits.mul(r.Count) + + return limits +} + +func (r *ResourceUnit) validatePricing() error { + if !r.GetPrice().IsValid() { + return fmt.Errorf("error: invalid price object") + } + + if r.Price.Amount.GT(sdk.NewDecFromInt(sdk.NewIntFromUint64(validationConfig.Unit.Max.Price))) { + return fmt.Errorf("error: invalid unit price (%v > %v fails)", validationConfig.Unit.Max.Price, r.Price) + } + + return nil +} + +func validateResources(units types.Resources) error { + if units.ID == 0 { + return fmt.Errorf("error: invalid resources ID (> 0 fails)") + } + + if err := validateCPU(units.CPU); err != nil { + return err + } + + if err := validateGPU(units.GPU); err != nil { + return err + } + + if err := validateMemory(units.Memory); err != nil { + return err + } + + if err := validateStorage(units.Storage); err != nil { + return err + } + + return nil +} + +func validateCPU(u *types.CPU) error { + if u == nil { + return fmt.Errorf("error: invalid unit CPU, cannot be nil") + } + + if (u.Units.Value() > uint64(validationConfig.Unit.Max.CPU)) || (u.Units.Value() < uint64(validationConfig.Unit.Min.CPU)) { + return fmt.Errorf("error: invalid unit CPU (%v > %v > %v fails)", + validationConfig.Unit.Max.CPU, u.Units.Value(), validationConfig.Unit.Max.CPU) + } + + if err := u.Attributes.Validate(); err != nil { + return fmt.Errorf("error: invalid CPU attributes: %w", err) + } + + return nil +} + +func validateGPU(u *types.GPU) error { + if u == nil { + return fmt.Errorf("error: invalid unit GPU, cannot be nil") + } + + if (u.Units.Value() > uint64(validationConfig.Unit.Max.GPU)) || (u.Units.Value() < uint64(validationConfig.Unit.Min.GPU)) { + return fmt.Errorf("error: invalid unit GPU (%v > %v > %v fails)", + validationConfig.Unit.Max.GPU, u.Units.Value(), validationConfig.Unit.Max.GPU) + } + + if u.Units.Value() == 0 && len(u.Attributes) > 0 { + return fmt.Errorf("error: invalid GPU state. attributes cannot be present if units == 0") + } + + if err := u.Attributes.Validate(); err != nil { + return fmt.Errorf("error: invalid GPU attributes: %w", err) + } + + return nil +} + +func validateMemory(u *types.Memory) error { + if u == nil { + return fmt.Errorf("error: invalid unit memory, cannot be nil") + } + if (u.Quantity.Value() > validationConfig.Unit.Max.Memory) || (u.Quantity.Value() < validationConfig.Unit.Min.Memory) { + return fmt.Errorf("error: invalid unit memory (%v > %v > %v fails)", + validationConfig.Unit.Max.Memory, u.Quantity.Value(), validationConfig.Unit.Max.Memory) + } + + if err := u.Attributes.Validate(); err != nil { + return fmt.Errorf("error: invalid Memory attributes: %w", err) + } + + return nil +} + +func validateStorage(u types.Volumes) error { + if u == nil { + return fmt.Errorf("error: invalid unit storage, cannot be nil") + } + + for i := range u { + if (u[i].Quantity.Value() > validationConfig.Unit.Max.Storage) || (u[i].Quantity.Value() < validationConfig.Unit.Min.Storage) { + return fmt.Errorf("error: invalid unit storage (%v > %v > %v fails)", + validationConfig.Unit.Max.Storage, u[i].Quantity.Value(), validationConfig.Unit.Min.Storage) + } + + if err := u[i].Attributes.Validate(); err != nil { + return fmt.Errorf("error: invalid Storage attributes: %w", err) + } + } + + return nil +} diff --git a/go/node/deployment/v1beta4/resourceunit.pb.go b/go/node/deployment/v1beta4/resourceunit.pb.go new file mode 100644 index 00000000..5ab54dc6 --- /dev/null +++ b/go/node/deployment/v1beta4/resourceunit.pb.go @@ -0,0 +1,445 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/resourceunit.proto + +package v1beta4 + +import ( + fmt "fmt" + v1 "github.com/akash-network/akash-api/go/node/types/resources/v1" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ResourceUnit extends Resources and adds Count along with the Price +type ResourceUnit struct { + v1.Resources `protobuf:"bytes,1,opt,name=resource,proto3,embedded=resource" json:"resource" yaml:"resource"` + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count" yaml:"count"` + Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` +} + +func (m *ResourceUnit) Reset() { *m = ResourceUnit{} } +func (m *ResourceUnit) String() string { return proto.CompactTextString(m) } +func (*ResourceUnit) ProtoMessage() {} +func (*ResourceUnit) Descriptor() ([]byte, []int) { + return fileDescriptor_d48c54f3414ff9e1, []int{0} +} +func (m *ResourceUnit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceUnit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceUnit.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceUnit) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceUnit.Merge(m, src) +} +func (m *ResourceUnit) XXX_Size() int { + return m.Size() +} +func (m *ResourceUnit) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceUnit.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceUnit proto.InternalMessageInfo + +func (m *ResourceUnit) GetCount() uint32 { + if m != nil { + return m.Count + } + return 0 +} + +func (m *ResourceUnit) GetPrice() types.DecCoin { + if m != nil { + return m.Price + } + return types.DecCoin{} +} + +func init() { + proto.RegisterType((*ResourceUnit)(nil), "akash.deployment.v1beta4.ResourceUnit") +} + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/resourceunit.proto", fileDescriptor_d48c54f3414ff9e1) +} + +var fileDescriptor_d48c54f3414ff9e1 = []byte{ + // 349 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x91, 0xb1, 0x4a, 0xc3, 0x40, + 0x1c, 0xc6, 0x73, 0xd5, 0x8a, 0xc4, 0x8a, 0x10, 0x1c, 0x62, 0xd1, 0xa4, 0x64, 0x69, 0x41, 0xbc, + 0x23, 0xea, 0x54, 0x70, 0x89, 0xbe, 0x80, 0x01, 0x11, 0xdc, 0x92, 0xf4, 0x48, 0x43, 0x9b, 0xfb, + 0x87, 0xdc, 0xa5, 0xd2, 0xd1, 0x37, 0xf0, 0x11, 0x7c, 0x9c, 0x8e, 0x1d, 0x9d, 0x82, 0xb4, 0x8b, + 0x74, 0xec, 0x13, 0x48, 0xee, 0xd2, 0x66, 0x71, 0xcb, 0xf7, 0xdd, 0xf7, 0xfd, 0xf8, 0xee, 0xa2, + 0x5f, 0x07, 0x93, 0x80, 0x8f, 0xc9, 0x88, 0x66, 0x53, 0x98, 0xa7, 0x94, 0x09, 0x32, 0x73, 0x43, + 0x2a, 0x82, 0x7b, 0x92, 0x53, 0x0e, 0x45, 0x1e, 0xd1, 0x82, 0x25, 0x02, 0x67, 0x39, 0x08, 0x30, + 0x4c, 0x19, 0xc6, 0x4d, 0x18, 0xd7, 0xe1, 0xee, 0x79, 0x0c, 0x31, 0xc8, 0x10, 0xa9, 0xbe, 0x54, + 0xbe, 0xdb, 0x57, 0xf0, 0x30, 0xe0, 0x74, 0x8f, 0xe3, 0x64, 0xe6, 0x36, 0xa2, 0x0e, 0x5a, 0x11, + 0xf0, 0x14, 0xb8, 0x4a, 0x2a, 0xa6, 0x4b, 0x22, 0x48, 0x98, 0x3a, 0x77, 0x3e, 0x5a, 0x7a, 0xc7, + 0xaf, 0x3b, 0x2f, 0x2c, 0x11, 0x46, 0xac, 0x1f, 0xef, 0x18, 0x26, 0xea, 0xa1, 0xc1, 0xc9, 0xad, + 0x83, 0xd5, 0xb8, 0x0a, 0x81, 0x1b, 0xfe, 0xcc, 0xc5, 0xbb, 0x22, 0xf7, 0xfa, 0x8b, 0xd2, 0xd6, + 0x96, 0xa5, 0x8d, 0x36, 0xa5, 0xbd, 0xef, 0x6f, 0x4b, 0xfb, 0x6c, 0x1e, 0xa4, 0xd3, 0xa1, 0xb3, + 0x73, 0x1c, 0x7f, 0x7f, 0x68, 0x10, 0xbd, 0x1d, 0x41, 0xc1, 0x84, 0xd9, 0xea, 0xa1, 0xc1, 0xa9, + 0x77, 0xb1, 0x29, 0x6d, 0x65, 0x6c, 0x4b, 0xbb, 0xa3, 0x6a, 0x52, 0x3a, 0xbe, 0xb2, 0x8d, 0x67, + 0xbd, 0x9d, 0xe5, 0x49, 0x44, 0xcd, 0x03, 0x39, 0xeb, 0x12, 0xab, 0xab, 0xa9, 0x5d, 0xf5, 0xd5, + 0xf0, 0x13, 0x8d, 0x1e, 0x21, 0x61, 0xde, 0x55, 0x35, 0xa8, 0x42, 0xca, 0x4a, 0x83, 0x94, 0xd2, + 0xf1, 0x95, 0x3d, 0x3c, 0xfc, 0xfd, 0xb2, 0x91, 0xf7, 0xba, 0x58, 0x59, 0x68, 0xb9, 0xb2, 0xd0, + 0xcf, 0xca, 0x42, 0x9f, 0x6b, 0x4b, 0x5b, 0xae, 0x2d, 0xed, 0x7b, 0x6d, 0x69, 0x6f, 0x0f, 0x71, + 0x22, 0xc6, 0x45, 0x88, 0x23, 0x48, 0x89, 0x7c, 0x84, 0x1b, 0x46, 0xc5, 0x3b, 0xe4, 0x93, 0x5a, + 0x05, 0x59, 0x42, 0x62, 0x20, 0x0c, 0x46, 0xf4, 0x9f, 0x1f, 0x1d, 0x1e, 0xc9, 0x37, 0xbe, 0xfb, + 0x0b, 0x00, 0x00, 0xff, 0xff, 0x67, 0x96, 0x3e, 0x5e, 0x0b, 0x02, 0x00, 0x00, +} + +func (this *ResourceUnit) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceUnit) + if !ok { + that2, ok := that.(ResourceUnit) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Resources.Equal(&that1.Resources) { + return false + } + if this.Count != that1.Count { + return false + } + if !this.Price.Equal(&that1.Price) { + return false + } + return true +} +func (m *ResourceUnit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceUnit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceUnit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResourceunit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.Count != 0 { + i = encodeVarintResourceunit(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResourceunit(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintResourceunit(dAtA []byte, offset int, v uint64) int { + offset -= sovResourceunit(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResourceUnit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resources.Size() + n += 1 + l + sovResourceunit(uint64(l)) + if m.Count != 0 { + n += 1 + sovResourceunit(uint64(m.Count)) + } + l = m.Price.Size() + n += 1 + l + sovResourceunit(uint64(l)) + return n +} + +func sovResourceunit(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozResourceunit(x uint64) (n int) { + return sovResourceunit(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResourceUnit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourceunit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceUnit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceUnit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourceunit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResourceunit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResourceunit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourceunit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourceunit + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResourceunit + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResourceunit + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResourceunit(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthResourceunit + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResourceunit(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourceunit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourceunit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourceunit + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthResourceunit + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupResourceunit + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthResourceunit + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthResourceunit = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResourceunit = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupResourceunit = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/deployment/v1beta4/resourceunits.go b/go/node/deployment/v1beta4/resourceunits.go new file mode 100644 index 00000000..290ee4c0 --- /dev/null +++ b/go/node/deployment/v1beta4/resourceunits.go @@ -0,0 +1,82 @@ +package v1beta4 + +import ( + "fmt" + "sort" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +type ResourceUnits []ResourceUnit + +var _ sort.Interface = (*ResourceUnits)(nil) + +func (s ResourceUnits) Len() int { + return len(s) +} + +func (s ResourceUnits) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s ResourceUnits) Less(i, j int) bool { + return s[i].ID < s[j].ID +} + +func (s ResourceUnits) Dup() ResourceUnits { + res := make(ResourceUnits, 0, len(s)) + + for _, ru := range s { + res = append(res, ru.Dup()) + } + + return res +} + +func (s ResourceUnits) Validate() error { + // if count := len(s); count > validationConfig.MaxGroupUnits { + // return fmt.Errorf("too many units (%v > %v)", count, validationConfig.MaxGroupUnits) + // } + + ids := make(map[uint32]bool) + for _, res := range s { + if err := res.validate(); err != nil { + return err + } + if res.ID == 0 { + return fmt.Errorf("resources ID must be > 0") + } + + if _, exists := ids[res.ID]; exists { + return fmt.Errorf("duplicate resources ID (%d) within group", res.ID) + } + + ids[res.ID] = true + } + + limits := newLimits() + + for idx := range s { + limits.add(s[idx].totalResources()) + } + + if limits.cpu.GT(sdk.NewIntFromUint64(uint64(validationConfig.Group.Max.CPU))) || limits.cpu.LTE(sdk.ZeroInt()) { + return fmt.Errorf("invalid total CPU (%v > %v > %v fails)", validationConfig.Group.Max.CPU, limits.cpu, 0) + } + + if !limits.gpu.IsZero() && (limits.gpu.GT(sdk.NewIntFromUint64(uint64(validationConfig.Group.Max.GPU))) || limits.gpu.LTE(sdk.ZeroInt())) { + return fmt.Errorf("invalid total GPU (%v > %v > %v fails)", validationConfig.Group.Max.GPU, limits.gpu, 0) + } + + if limits.memory.GT(sdk.NewIntFromUint64(validationConfig.Group.Max.Memory)) || limits.memory.LTE(sdk.ZeroInt()) { + return fmt.Errorf("invalid total memory (%v > %v > %v fails)", validationConfig.Group.Max.Memory, limits.memory, 0) + } + + for i := range limits.storage { + if limits.storage[i].GT(sdk.NewIntFromUint64(validationConfig.Group.Max.Storage)) || limits.storage[i].LTE(sdk.ZeroInt()) { + return fmt.Errorf("invalid total storage (%v > %v > %v fails)", validationConfig.Group.Max.Storage, limits.storage, 0) + } + } + + return nil +} diff --git a/go/node/deployment/v1beta4/service.pb.go b/go/node/deployment/v1beta4/service.pb.go new file mode 100644 index 00000000..a5641122 --- /dev/null +++ b/go/node/deployment/v1beta4/service.pb.go @@ -0,0 +1,365 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/deployment/v1beta4/service.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("akash/deployment/v1beta4/service.proto", fileDescriptor_2013a754c1800268) +} + +var fileDescriptor_2013a754c1800268 = []byte{ + // 325 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0xd3, 0x31, 0x4e, 0xeb, 0x30, + 0x18, 0xc0, 0xf1, 0x56, 0x4f, 0xea, 0xe0, 0xe5, 0x95, 0x4c, 0xc8, 0x83, 0x47, 0xba, 0xb4, 0xb6, + 0x80, 0xc2, 0xc6, 0x42, 0x2b, 0x31, 0x55, 0x42, 0x20, 0x84, 0xc4, 0xe6, 0xb6, 0x1f, 0x6e, 0xd4, + 0x36, 0xb6, 0x6c, 0xa7, 0x14, 0x71, 0x09, 0x2e, 0xc1, 0x5d, 0x18, 0x3b, 0x32, 0xa2, 0xe4, 0x22, + 0x88, 0x50, 0xe2, 0x10, 0x48, 0x93, 0x8c, 0x49, 0x7e, 0xf9, 0xfe, 0x71, 0x64, 0xa3, 0x03, 0x3e, + 0xe7, 0x66, 0xc6, 0xa6, 0xa0, 0x16, 0xf2, 0x71, 0x09, 0x81, 0x65, 0xab, 0xc3, 0x31, 0x58, 0xde, + 0x67, 0x06, 0xf4, 0xca, 0x9f, 0x00, 0x55, 0x5a, 0x5a, 0xe9, 0xed, 0x27, 0x8e, 0x3a, 0x47, 0xb7, + 0x0e, 0x77, 0x0b, 0x27, 0xb8, 0x5b, 0x4b, 0x23, 0xbe, 0xe6, 0xe0, 0x4e, 0xa1, 0x16, 0x5a, 0x86, + 0x2a, 0x85, 0x47, 0x2f, 0x2d, 0xf4, 0x6f, 0x64, 0x84, 0xb7, 0x46, 0xed, 0x81, 0x06, 0x6e, 0x61, + 0x98, 0xbe, 0xe2, 0xf5, 0x68, 0xd1, 0xd7, 0xd0, 0x91, 0x11, 0x79, 0x8e, 0x4f, 0x6a, 0xf1, 0x2b, + 0x30, 0x4a, 0x06, 0x06, 0xbc, 0x27, 0xb4, 0x37, 0x04, 0x25, 0x8d, 0x6f, 0x33, 0x69, 0xba, 0x73, + 0xd6, 0x2f, 0x8f, 0x4f, 0xeb, 0xf9, 0x34, 0xbe, 0x46, 0xed, 0x1b, 0x35, 0xad, 0xb3, 0xec, 0x3c, + 0x2f, 0x59, 0x76, 0x9e, 0xa7, 0xe5, 0x10, 0xfd, 0x1f, 0x2c, 0xa4, 0xc9, 0x86, 0xbb, 0xbb, 0x7f, + 0xe0, 0x4f, 0x8d, 0xfb, 0x75, 0x74, 0x9a, 0xbd, 0x47, 0x28, 0x79, 0x74, 0xf1, 0xb9, 0x0d, 0xbc, + 0x4e, 0xf9, 0x8c, 0x04, 0x62, 0x56, 0x11, 0x66, 0x3b, 0x97, 0x3c, 0xac, 0xd6, 0x71, 0xb0, 0xa4, + 0xe3, 0x60, 0xb6, 0x73, 0x6d, 0xb9, 0xb6, 0x55, 0x3a, 0x0e, 0x96, 0x74, 0x1c, 0xfc, 0xee, 0x9c, + 0xdf, 0xbe, 0x46, 0xa4, 0xb9, 0x89, 0x48, 0xf3, 0x3d, 0x22, 0xcd, 0xe7, 0x98, 0x34, 0x36, 0x31, + 0x69, 0xbc, 0xc5, 0xa4, 0x71, 0x77, 0x26, 0x7c, 0x3b, 0x0b, 0xc7, 0x74, 0x22, 0x97, 0x2c, 0x19, + 0xda, 0x0b, 0xc0, 0x3e, 0x48, 0x3d, 0xdf, 0x5e, 0x71, 0xe5, 0x33, 0x21, 0x59, 0x20, 0xa7, 0xf0, + 0xc7, 0x79, 0x1c, 0xb7, 0x92, 0x73, 0x78, 0xfc, 0x11, 0x00, 0x00, 0xff, 0xff, 0x99, 0x1b, 0x7d, + 0x02, 0x22, 0x04, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // CreateDeployment defines a method to create new deployment given proper inputs. + CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) + // DepositDeployment deposits more funds into the deployment account + DepositDeployment(ctx context.Context, in *MsgDepositDeployment, opts ...grpc.CallOption) (*MsgDepositDeploymentResponse, error) + // UpdateDeployment defines a method to update a deployment given proper inputs. + UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) + // CloseDeployment defines a method to close a deployment given proper inputs. + CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) + // CloseGroup defines a method to close a group of a deployment given proper inputs. + CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) + // PauseGroup defines a method to close a group of a deployment given proper inputs. + PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) + // StartGroup defines a method to close a group of a deployment given proper inputs. + StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) CreateDeployment(ctx context.Context, in *MsgCreateDeployment, opts ...grpc.CallOption) (*MsgCreateDeploymentResponse, error) { + out := new(MsgCreateDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/CreateDeployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) DepositDeployment(ctx context.Context, in *MsgDepositDeployment, opts ...grpc.CallOption) (*MsgDepositDeploymentResponse, error) { + out := new(MsgDepositDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/DepositDeployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateDeployment(ctx context.Context, in *MsgUpdateDeployment, opts ...grpc.CallOption) (*MsgUpdateDeploymentResponse, error) { + out := new(MsgUpdateDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/UpdateDeployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CloseDeployment(ctx context.Context, in *MsgCloseDeployment, opts ...grpc.CallOption) (*MsgCloseDeploymentResponse, error) { + out := new(MsgCloseDeploymentResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/CloseDeployment", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CloseGroup(ctx context.Context, in *MsgCloseGroup, opts ...grpc.CallOption) (*MsgCloseGroupResponse, error) { + out := new(MsgCloseGroupResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/CloseGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) PauseGroup(ctx context.Context, in *MsgPauseGroup, opts ...grpc.CallOption) (*MsgPauseGroupResponse, error) { + out := new(MsgPauseGroupResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/PauseGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) StartGroup(ctx context.Context, in *MsgStartGroup, opts ...grpc.CallOption) (*MsgStartGroupResponse, error) { + out := new(MsgStartGroupResponse) + err := c.cc.Invoke(ctx, "/akash.deployment.v1beta4.Msg/StartGroup", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // CreateDeployment defines a method to create new deployment given proper inputs. + CreateDeployment(context.Context, *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) + // DepositDeployment deposits more funds into the deployment account + DepositDeployment(context.Context, *MsgDepositDeployment) (*MsgDepositDeploymentResponse, error) + // UpdateDeployment defines a method to update a deployment given proper inputs. + UpdateDeployment(context.Context, *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) + // CloseDeployment defines a method to close a deployment given proper inputs. + CloseDeployment(context.Context, *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) + // CloseGroup defines a method to close a group of a deployment given proper inputs. + CloseGroup(context.Context, *MsgCloseGroup) (*MsgCloseGroupResponse, error) + // PauseGroup defines a method to close a group of a deployment given proper inputs. + PauseGroup(context.Context, *MsgPauseGroup) (*MsgPauseGroupResponse, error) + // StartGroup defines a method to close a group of a deployment given proper inputs. + StartGroup(context.Context, *MsgStartGroup) (*MsgStartGroupResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) CreateDeployment(ctx context.Context, req *MsgCreateDeployment) (*MsgCreateDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateDeployment not implemented") +} +func (*UnimplementedMsgServer) DepositDeployment(ctx context.Context, req *MsgDepositDeployment) (*MsgDepositDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DepositDeployment not implemented") +} +func (*UnimplementedMsgServer) UpdateDeployment(ctx context.Context, req *MsgUpdateDeployment) (*MsgUpdateDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateDeployment not implemented") +} +func (*UnimplementedMsgServer) CloseDeployment(ctx context.Context, req *MsgCloseDeployment) (*MsgCloseDeploymentResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseDeployment not implemented") +} +func (*UnimplementedMsgServer) CloseGroup(ctx context.Context, req *MsgCloseGroup) (*MsgCloseGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseGroup not implemented") +} +func (*UnimplementedMsgServer) PauseGroup(ctx context.Context, req *MsgPauseGroup) (*MsgPauseGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method PauseGroup not implemented") +} +func (*UnimplementedMsgServer) StartGroup(ctx context.Context, req *MsgStartGroup) (*MsgStartGroupResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method StartGroup not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_CreateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateDeployment) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateDeployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/CreateDeployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateDeployment(ctx, req.(*MsgCreateDeployment)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_DepositDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgDepositDeployment) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).DepositDeployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/DepositDeployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).DepositDeployment(ctx, req.(*MsgDepositDeployment)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateDeployment) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateDeployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/UpdateDeployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateDeployment(ctx, req.(*MsgUpdateDeployment)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CloseDeployment_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCloseDeployment) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CloseDeployment(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/CloseDeployment", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CloseDeployment(ctx, req.(*MsgCloseDeployment)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CloseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCloseGroup) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CloseGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/CloseGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CloseGroup(ctx, req.(*MsgCloseGroup)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_PauseGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgPauseGroup) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).PauseGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/PauseGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).PauseGroup(ctx, req.(*MsgPauseGroup)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_StartGroup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgStartGroup) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).StartGroup(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.deployment.v1beta4.Msg/StartGroup", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).StartGroup(ctx, req.(*MsgStartGroup)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.deployment.v1beta4.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateDeployment", + Handler: _Msg_CreateDeployment_Handler, + }, + { + MethodName: "DepositDeployment", + Handler: _Msg_DepositDeployment_Handler, + }, + { + MethodName: "UpdateDeployment", + Handler: _Msg_UpdateDeployment_Handler, + }, + { + MethodName: "CloseDeployment", + Handler: _Msg_CloseDeployment_Handler, + }, + { + MethodName: "CloseGroup", + Handler: _Msg_CloseGroup_Handler, + }, + { + MethodName: "PauseGroup", + Handler: _Msg_PauseGroup_Handler, + }, + { + MethodName: "StartGroup", + Handler: _Msg_StartGroup_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/deployment/v1beta4/service.proto", +} diff --git a/go/node/deployment/v1beta4/types.go b/go/node/deployment/v1beta4/types.go new file mode 100644 index 00000000..1f9e2683 --- /dev/null +++ b/go/node/deployment/v1beta4/types.go @@ -0,0 +1,122 @@ +package v1beta4 + +import ( + "bytes" + + types "github.com/akash-network/akash-api/go/node/types/resources/v1" +) + +type attributesMatching map[string]types.Attributes + +const ( + // ManifestVersionLength is the length of manifest version + ManifestVersionLength = 32 + + // DefaultOrderBiddingDuration is the default time limit for an Order being active. + // After the duration, the Order is automatically closed. + // ( 24(hr) * 3600(seconds per hour) ) / 7s-Block + DefaultOrderBiddingDuration = int64(12342) + + // MaxBiddingDuration is roughly 30 days of block height + MaxBiddingDuration = DefaultOrderBiddingDuration * int64(30) +) + +// ID method returns DeploymentID details of specific deployment +func (obj Deployment) ID() DeploymentID { + return obj.DeploymentID +} + +// MatchAttributes method compares provided attributes with specific group attributes +func (g *GroupSpec) MatchAttributes(attr types.Attributes) bool { + return types.AttributesSubsetOf(g.Requirements.Attributes, attr) +} + +// ID method returns GroupID details of specific group +func (g Group) ID() GroupID { + return g.GroupID +} + +// ValidateClosable provides error response if group is already closed, +// and thus should not be closed again, else nil. +func (g Group) ValidateClosable() error { + switch g.State { + case GroupClosed: + return ErrGroupClosed + default: + return nil + } +} + +// ValidatePausable provides error response if group is not pausable +func (g Group) ValidatePausable() error { + switch g.State { + case GroupClosed: + return ErrGroupClosed + case GroupPaused: + return ErrGroupPaused + default: + return nil + } +} + +// ValidatePausable provides error response if group is not pausable +func (g Group) ValidateStartable() error { + switch g.State { + case GroupClosed: + return ErrGroupClosed + case GroupOpen: + return ErrGroupOpen + default: + return nil + } +} + +// GetName method returns group name +func (g Group) GetName() string { + return g.GroupSpec.Name +} + +// GetResourceUnits method returns resources list in group +func (g Group) GetResourceUnits() ResourceUnits { + return g.GroupSpec.Resources +} + +// DeploymentResponses is a collection of DeploymentResponse +type DeploymentResponses []QueryDeploymentResponse + +func (ds DeploymentResponses) String() string { + var buf bytes.Buffer + + const sep = "\n\n" + + for _, d := range ds { + buf.WriteString(d.String()) + buf.WriteString(sep) + } + + if len(ds) > 0 { + buf.Truncate(buf.Len() - len(sep)) + } + + return buf.String() +} + +// Accept returns whether deployment filters valid or not +func (filters DeploymentFilters) Accept(obj Deployment, stateVal Deployment_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != obj.DeploymentID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != obj.DeploymentID.DSeq { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != obj.State { + return false + } + + return true +} diff --git a/go/node/deployment/v1beta4/types_test.go b/go/node/deployment/v1beta4/types_test.go new file mode 100644 index 00000000..d4217039 --- /dev/null +++ b/go/node/deployment/v1beta4/types_test.go @@ -0,0 +1,461 @@ +package v1beta4_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + sdk "github.com/cosmos/cosmos-sdk/types" + abci "github.com/tendermint/tendermint/abci/types" + + atypes "github.com/akash-network/akash-api/go/node/audit/v1beta4" + types "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + akashtypes "github.com/akash-network/akash-api/go/node/types/resources/v1" + "github.com/akash-network/akash-api/go/sdkutil" + tutil "github.com/akash-network/akash-api/go/testutil" + testutil "github.com/akash-network/akash-api/go/testutil/v1beta3" +) + +type gStateTest struct { + state types.Group_State + expValidateClosable error +} + +func TestGroupState(t *testing.T) { + tests := []gStateTest{ + { + state: types.GroupOpen, + }, + { + state: types.GroupOpen, + }, + { + state: types.GroupInsufficientFunds, + }, + { + state: types.GroupClosed, + expValidateClosable: types.ErrGroupClosed, + }, + { + state: types.Group_State(99), + }, + } + + for _, test := range tests { + group := types.Group{ + GroupID: testutil.GroupID(t), + State: test.state, + } + + assert.Equal(t, group.ValidateClosable(), test.expValidateClosable, group.State) + } +} + +func TestDeploymentVersionAttributeLifecycle(t *testing.T) { + d := testutil.Deployment(t) + + t.Run("deployment created", func(t *testing.T) { + edc := types.NewEventDeploymentCreated(d.ID(), d.Version) + sdkEvent := edc.ToSDKEvent() + strEvent := sdk.StringifyEvent(abci.Event(sdkEvent)) + + ev, err := sdkutil.ParseEvent(strEvent) + require.NoError(t, err) + + versionString, err := types.ParseEVDeploymentVersion(ev.Attributes) + require.NoError(t, err) + assert.Equal(t, d.Version, versionString) + }) + + t.Run("deployment updated", func(t *testing.T) { + edu := types.NewEventDeploymentUpdated(d.ID(), d.Version) + + sdkEvent := edu.ToSDKEvent() + strEvent := sdk.StringifyEvent(abci.Event(sdkEvent)) + + ev, err := sdkutil.ParseEvent(strEvent) + require.NoError(t, err) + + versionString, err := types.ParseEVDeploymentVersion(ev.Attributes) + require.NoError(t, err) + assert.Equal(t, d.Version, versionString) + }) + + t.Run("deployment closed error", func(t *testing.T) { + edc := types.NewEventDeploymentClosed(d.ID()) + + sdkEvent := edc.ToSDKEvent() + strEvent := sdk.StringifyEvent(abci.Event(sdkEvent)) + + ev, err := sdkutil.ParseEvent(strEvent) + require.NoError(t, err) + + versionString, err := types.ParseEVDeploymentVersion(ev.Attributes) + require.Error(t, err) + assert.NotEqual(t, d.Version, versionString) + }) +} + +func TestGroupSpecValidation(t *testing.T) { + tests := []struct { + desc string + gspec types.GroupSpec + expErr error + }{ + { + desc: "groupspec requires name", + gspec: types.GroupSpec{ + Name: "", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + }, + expErr: types.ErrInvalidGroups, + }, + { + desc: "groupspec valid", + gspec: types.GroupSpec{ + Name: "hihi", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + }, + expErr: nil, + }, + } + + for _, test := range tests { + err := test.gspec.ValidateBasic() + if test.expErr != nil { + assert.Error(t, err, test.desc) + continue + } + assert.Equal(t, test.expErr, err, test.desc) + } +} + +func TestGroupPlacementRequirementsNoSigners(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + providerAttr := []atypes.Provider{ + { + Owner: "test", + Attributes: group.Requirements.Attributes, + }, + } + + require.True(t, group.MatchRequirements(providerAttr)) +} + +func TestGroupPlacementRequirementsSignerAllOf(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor1") + group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor2") + + providerAttr := []atypes.Provider{ + { + Owner: "test", + Attributes: group.Requirements.Attributes, + }, + } + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.Provider{ + Owner: "test", + Auditor: "auditor2", + Attributes: group.Requirements.Attributes, + }) + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.Provider{ + Owner: "test", + Auditor: "auditor1", + Attributes: group.Requirements.Attributes, + }) + + require.True(t, group.MatchRequirements(providerAttr)) +} + +func TestGroupPlacementRequirementsSignerAnyOf(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Requirements.SignedBy.AnyOf = append(group.Requirements.SignedBy.AnyOf, "auditor1") + + providerAttr := []atypes.Provider{ + { + Owner: "test", + Attributes: group.Requirements.Attributes, + }, + } + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.Provider{ + Owner: "test", + Auditor: "auditor2", + Attributes: group.Requirements.Attributes, + }) + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.Provider{ + Owner: "test", + Auditor: "auditor1", + Attributes: group.Requirements.Attributes, + }) + + require.True(t, group.MatchRequirements(providerAttr)) +} + +func TestGroupPlacementRequirementsSignerAllOfAnyOf(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor1") + group.Requirements.SignedBy.AllOf = append(group.Requirements.SignedBy.AllOf, "auditor2") + + group.Requirements.SignedBy.AnyOf = append(group.Requirements.SignedBy.AnyOf, "auditor3") + group.Requirements.SignedBy.AnyOf = append(group.Requirements.SignedBy.AnyOf, "auditor4") + + providerAttr := []atypes.Provider{ + { + Owner: "test", + Attributes: group.Requirements.Attributes, + }, + { + Owner: "test", + Auditor: "auditor3", + Attributes: group.Requirements.Attributes, + }, + { + Owner: "test", + Auditor: "auditor4", + Attributes: group.Requirements.Attributes, + }, + } + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.Provider{ + Owner: "test", + Auditor: "auditor2", + Attributes: group.Requirements.Attributes, + }) + + require.False(t, group.MatchRequirements(providerAttr)) + + providerAttr = append(providerAttr, atypes.Provider{ + Owner: "test", + Auditor: "auditor1", + Attributes: group.Requirements.Attributes, + }) + + require.True(t, group.MatchRequirements(providerAttr)) +} + +func TestGroupSpec_MatchResourcesAttributes(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Resources[0].Storage[0].Attributes = akashtypes.Attributes{ + { + Key: "persistent", + Value: "true", + }, + { + Key: "class", + Value: "default", + }, + } + + provAttributes := akashtypes.Attributes{ + { + Key: "capabilities/storage/1/class", + Value: "default", + }, + { + Key: "capabilities/storage/1/persistent", + Value: "true", + }, + } + + prov2Attributes := akashtypes.Attributes{ + { + Key: "capabilities/storage/1/class", + Value: "default", + }, + } + + prov3Attributes := akashtypes.Attributes{ + { + Key: "capabilities/storage/1/class", + Value: "beta2", + }, + } + + match := group.MatchResourcesRequirements(provAttributes) + require.True(t, match) + match = group.MatchResourcesRequirements(prov2Attributes) + require.False(t, match) + match = group.MatchResourcesRequirements(prov3Attributes) + require.False(t, match) +} + +func TestGroupSpec_MatchGPUAttributes(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Resources[0].GPU.Attributes = akashtypes.Attributes{ + { + Key: "vendor/nvidia/model/a100", + Value: "true", + }, + } + + provAttributes := akashtypes.Attributes{ + { + Key: "capabilities/storage/1/class", + Value: "default", + }, + { + Key: "capabilities/storage/1/persistent", + Value: "true", + }, + { + Key: "capabilities/gpu/vendor/nvidia/model/a100", + Value: "true", + }, + } + + prov2Attributes := akashtypes.Attributes{ + { + Key: "capabilities/storage/1/class", + Value: "default", + }, + } + + prov3Attributes := akashtypes.Attributes{ + { + Key: "capabilities/storage/1/class", + Value: "beta2", + }, + } + + match := group.MatchResourcesRequirements(provAttributes) + require.True(t, match) + match = group.MatchResourcesRequirements(prov2Attributes) + require.False(t, match) + match = group.MatchResourcesRequirements(prov3Attributes) + require.False(t, match) +} + +func TestGroupSpec_MatchGPUAttributesWildcard(t *testing.T) { + group := types.GroupSpec{ + Name: "spec", + Requirements: testutil.PlacementRequirements(t), + Resources: testutil.ResourcesList(t, 1), + } + + group.Resources[0].GPU.Attributes = akashtypes.Attributes{ + { + Key: "vendor/nvidia/model/*", + Value: "true", + }, + } + + provAttributes := akashtypes.Attributes{ + { + Key: "capabilities/storage/1/class", + Value: "default", + }, + { + Key: "capabilities/storage/1/persistent", + Value: "true", + }, + { + Key: "capabilities/gpu/vendor/nvidia/model/a100", + Value: "true", + }, + } + + prov2Attributes := akashtypes.Attributes{ + { + Key: "capabilities/storage/1/class", + Value: "default", + }, + } + + prov3Attributes := akashtypes.Attributes{ + { + Key: "capabilities/storage/1/class", + Value: "beta2", + }, + } + + match := group.MatchResourcesRequirements(provAttributes) + require.True(t, match) + match = group.MatchResourcesRequirements(prov2Attributes) + require.False(t, match) + match = group.MatchResourcesRequirements(prov3Attributes) + require.False(t, match) +} + +func TestDepositDeploymentAuthorization_Accept(t *testing.T) { + limit := sdk.NewInt64Coin(tutil.CoinDenom, 333) + dda := types.NewDepositDeploymentAuthorization(limit) + + // Send the wrong type of message, expect an error + var msg sdk.Msg + response, err := dda.Accept(sdk.Context{}, msg) + require.Error(t, err) + require.Contains(t, err.Error(), "invalid type") + require.Zero(t, response) + + // Try to deposit too much coin, expect an error + msg = types.NewMsgDepositDeployment(testutil.DeploymentID(t), limit.Add(sdk.NewInt64Coin(tutil.CoinDenom, 1)), testutil.AccAddress(t).String()) + response, err = dda.Accept(sdk.Context{}, msg) + require.Error(t, err) + require.Contains(t, err.Error(), "requested amount is more than spend limit") + require.Zero(t, response) + + // Deposit 1 less than the limit, expect an updated deposit + msg = types.NewMsgDepositDeployment(testutil.DeploymentID(t), limit.Sub(sdk.NewInt64Coin(tutil.CoinDenom, 1)), testutil.AccAddress(t).String()) + response, err = dda.Accept(sdk.Context{}, msg) + require.NoError(t, err) + require.True(t, response.Accept) + require.False(t, response.Delete) + + ok := false + dda, ok = response.Updated.(*types.DepositDeploymentAuthorization) + require.True(t, ok) + + // Deposit the limit (now 1), expect that it is not to be deleted + msg = types.NewMsgDepositDeployment(testutil.DeploymentID(t), sdk.NewInt64Coin(tutil.CoinDenom, 1), testutil.AccAddress(t).String()) + response, err = dda.Accept(sdk.Context{}, msg) + require.NoError(t, err) + require.True(t, response.Accept) + require.False(t, response.Delete) +} diff --git a/go/node/deployment/v1beta4/validation_config.go b/go/node/deployment/v1beta4/validation_config.go new file mode 100644 index 00000000..a1b97d71 --- /dev/null +++ b/go/node/deployment/v1beta4/validation_config.go @@ -0,0 +1,118 @@ +package v1beta4 + +import ( + "github.com/akash-network/akash-api/go/node/types/unit" +) + +const ( + maxUnitCPU = 384 * 1000 // max amount of CPU units single replicate of service can request + maxUnitGPU = 24 + maxUnitMemory = 2 * unit.Ti + maxUnitStorage = 32 * unit.Ti + maxUnitCount = 50 // max amount of service replicas allowed + maxUnitPrice = 10000000 // 10akt + maxGroupCount = 20 // max amount of + maxGroupUnits = 20 +) + +// This is the validation configuration that acts as a hard limit +// on what the network accepts for deployments. This is never changed +// and is the same across all members of the network + +type Limits struct { + Memory uint64 + Storage uint64 + Price uint64 + CPU uint + GPU uint + Count uint +} + +type UnitLimits struct { + Max Limits + Min Limits +} + +type GroupLimit struct { + Limits + Units uint32 +} + +type GroupLimits struct { + Max GroupLimit +} + +type ValidationConfig struct { + Unit UnitLimits + Group GroupLimits + + // // MaxUnitCPU is the maximum number of milli (1/1000) cpu units a single instance may take + // MaxUnitCPU uint + // MaxUnitGPU uint + // // MaxUnitMemory is the maximum number of bytes of memory that a unit can consume + // MaxUnitMemory uint64 + // // MaxUnitStorage is the maximum number of bytes of storage that a unit can consume + // MaxUnitStorage uint64 + // // MaxUnitCount is the maximum number of replias of a service + // MaxUnitCount uint + // // MaxUnitPrice is the maximum price that a unit can have + // MaxUnitPrice uint64 + // + // MinUnitCPU uint + // MinUnitGPU uint + // MinUnitMemory uint64 + // MinUnitStorage uint64 + // MinUnitCount uint + // + // // MaxGroupCount is the maximum number of groups allowed per deployment + // MaxGroupCount int + // // MaxGroupUnits is the maximum number services per group + // MaxGroupUnits int + // + // // MaxGroupCPU is the maximum total amount of CPU requested per group + // MaxGroupCPU uint64 + // // MaxGroupGPU is the maximum total amount of GPU requested per group + // MaxGroupGPU uint64 + // // MaxGroupMemory is the maximum total amount of memory requested per group + // MaxGroupMemory uint64 + // // MaxGroupStorage is the maximum total amount of storage requested per group + // MaxGroupStorage uint64 +} + +var validationConfig = ValidationConfig{ + Unit: UnitLimits{ + Max: Limits{ + Memory: maxUnitMemory, + Storage: maxUnitStorage, + CPU: maxUnitCPU, + GPU: maxUnitGPU, + Count: maxUnitCount, + Price: maxUnitPrice, + }, + Min: Limits{ + Memory: unit.Mi, + Storage: 5 * unit.Mi, + CPU: 10, + GPU: 0, + Count: 1, + Price: 0, + }, + }, + Group: GroupLimits{ + Max: GroupLimit{ + Limits: Limits{ + Memory: maxUnitMemory * maxUnitCount, + Storage: maxUnitStorage * maxUnitCount, + CPU: maxUnitCPU * maxUnitCount, + GPU: maxUnitGPU * maxUnitCount, + Count: maxGroupCount, + Price: 0, + }, + Units: maxGroupUnits, + }, + }, +} + +func GetValidationConfig() ValidationConfig { + return validationConfig +} diff --git a/go/node/market/v1beta4/errors.go b/go/node/market/v1beta4/errors.go index 21cd9950..ae255f13 100644 --- a/go/node/market/v1beta4/errors.go +++ b/go/node/market/v1beta4/errors.go @@ -1,107 +1,72 @@ package v1beta4 import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errCodeEmptyProvider uint32 = iota + 1 - errCodeSameAccount - errCodeInternal - errCodeOverOrder - errCodeAttributeMismatch - errCodeUnknownBid - errCodeUnknownLease - errCodeUnknownLeaseForOrder - errCodeUnknownOrderForBid - errCodeLeaseNotActive - errCodeBidNotActive - errCodeBidNotOpen - errCodeOrderNotOpen - errCodeNoLeaseForOrder - errCodeOrderNotFound - errCodeGroupNotFound - errCodeGroupNotOpen - errCodeBidNotFound - errCodeBidZeroPrice - errCodeLeaseNotFound - errCodeBidExists - errCodeInvalidPrice - errCodeOrderActive - errCodeOrderClosed - errCodeOrderExists - errCodeOrderDurationExceeded - errCodeOrderTooEarly - errInvalidDeposit - errInvalidParam - errUnknownProvider - errInvalidBid - errCodeCapabilitiesMismatch + "errors" ) var ( // ErrEmptyProvider is the error when provider is empty - ErrEmptyProvider = sdkerrors.Register(ModuleName, errCodeEmptyProvider, "empty provider") + ErrEmptyProvider = errors.New("empty provider") // ErrSameAccount is the error when owner and provider are the same account - ErrSameAccount = sdkerrors.Register(ModuleName, errCodeSameAccount, "owner and provider are the same account") + ErrSameAccount = errors.New("owner and provider are the same account") // ErrInternal is the error for internal error - ErrInternal = sdkerrors.Register(ModuleName, errCodeInternal, "internal error") + ErrInternal = errors.New("internal error") // ErrBidOverOrder is the error when bid price is above max order price - ErrBidOverOrder = sdkerrors.Register(ModuleName, errCodeOverOrder, "bid price above max order price") + ErrBidOverOrder = errors.New("bid price above max order price") // ErrAttributeMismatch is the error for attribute mismatch - ErrAttributeMismatch = sdkerrors.Register(ModuleName, errCodeAttributeMismatch, "attribute mismatch") + ErrAttributeMismatch = errors.New("attribute mismatch") // ErrCapabilitiesMismatch is the error for capabilities mismatch - ErrCapabilitiesMismatch = sdkerrors.Register(ModuleName, errCodeCapabilitiesMismatch, "capabilities mismatch") + ErrCapabilitiesMismatch = errors.New("capabilities mismatch") // ErrUnknownBid is the error for unknown bid - ErrUnknownBid = sdkerrors.Register(ModuleName, errCodeUnknownBid, "unknown bid") + ErrUnknownBid = errors.New("unknown bid") // ErrUnknownLease is the error for unknown bid - ErrUnknownLease = sdkerrors.Register(ModuleName, errCodeUnknownLease, "unknown lease") + ErrUnknownLease = errors.New("unknown lease") // ErrUnknownLeaseForBid is the error when lease is unknown for bid - ErrUnknownLeaseForBid = sdkerrors.Register(ModuleName, errCodeUnknownLeaseForOrder, "unknown lease for bid") + ErrUnknownLeaseForBid = errors.New("unknown lease for bid") // ErrUnknownOrderForBid is the error when order is unknown for bid - ErrUnknownOrderForBid = sdkerrors.Register(ModuleName, errCodeUnknownOrderForBid, "unknown order for bid") + ErrUnknownOrderForBid = errors.New("unknown order for bid") // ErrLeaseNotActive is the error when lease is not active - ErrLeaseNotActive = sdkerrors.Register(ModuleName, errCodeLeaseNotActive, "lease not active") + ErrLeaseNotActive = errors.New("leas`e not active") // ErrBidNotActive is the error when bid is not matched - ErrBidNotActive = sdkerrors.Register(ModuleName, errCodeBidNotActive, "bid not active") + ErrBidNotActive = errors.New("bid not active") // ErrBidNotOpen is the error when bid is not matched - ErrBidNotOpen = sdkerrors.Register(ModuleName, errCodeBidNotOpen, "bid not open") + ErrBidNotOpen = errors.New("bid not open") // ErrNoLeaseForOrder is the error when there is no lease for order - ErrNoLeaseForOrder = sdkerrors.Register(ModuleName, errCodeNoLeaseForOrder, "no lease for order") + ErrNoLeaseForOrder = errors.New("no lease for order") // ErrOrderNotFound order not found - ErrOrderNotFound = sdkerrors.Register(ModuleName, errCodeOrderNotFound, "invalid order: order not found") + ErrOrderNotFound = errors.New("invalid order: order not found") // ErrGroupNotFound order not found - ErrGroupNotFound = sdkerrors.Register(ModuleName, errCodeGroupNotFound, "order not found") + ErrGroupNotFound = errors.New("order not found") // ErrGroupNotOpen order not found - ErrGroupNotOpen = sdkerrors.Register(ModuleName, errCodeGroupNotOpen, "order not open") + ErrGroupNotOpen = errors.New("order not open") // ErrOrderNotOpen order not found - ErrOrderNotOpen = sdkerrors.Register(ModuleName, errCodeOrderNotOpen, "bid: order not open") + ErrOrderNotOpen = errors.New("bid: order not open") // ErrBidNotFound bid not found - ErrBidNotFound = sdkerrors.Register(ModuleName, errCodeBidNotFound, "invalid bid: bid not found") + ErrBidNotFound = errors.New("invalid bid: bid not found") // ErrBidZeroPrice zero price - ErrBidZeroPrice = sdkerrors.Register(ModuleName, errCodeBidZeroPrice, "invalid bid: zero price") + ErrBidZeroPrice = errors.New("invalid bid: zero price") // ErrLeaseNotFound lease not found - ErrLeaseNotFound = sdkerrors.Register(ModuleName, errCodeLeaseNotFound, "invalid lease: lease not found") + ErrLeaseNotFound = errors.New("invalid lease: lease not found") // ErrBidExists bid exists - ErrBidExists = sdkerrors.Register(ModuleName, errCodeBidExists, "invalid bid: bid exists from provider") + ErrBidExists = errors.New("invalid bid: bid exists from provider") // ErrBidInvalidPrice bid invalid price - ErrBidInvalidPrice = sdkerrors.Register(ModuleName, errCodeInvalidPrice, "bid price is invalid") + ErrBidInvalidPrice = errors.New("bid price is invalid") // ErrOrderActive order active - ErrOrderActive = sdkerrors.New(ModuleName, errCodeOrderActive, "order active") + ErrOrderActive = errors.New("order active") // ErrOrderClosed order closed - ErrOrderClosed = sdkerrors.New(ModuleName, errCodeOrderClosed, "order closed") + ErrOrderClosed = errors.New("order closed") // ErrOrderExists indicates a new order was proposed overwrite the existing store key - ErrOrderExists = sdkerrors.New(ModuleName, errCodeOrderExists, "order already exists in store") + ErrOrderExists = errors.New("order already exists in store") // ErrOrderTooEarly to match bid - ErrOrderTooEarly = sdkerrors.New(ModuleName, errCodeOrderTooEarly, "order: chain height to low for bidding") + ErrOrderTooEarly = errors.New("order: chain height to low for bidding") // ErrOrderDurationExceeded order should be closed - ErrOrderDurationExceeded = sdkerrors.New(ModuleName, errCodeOrderDurationExceeded, "order duration has exceeded the bidding duration") + ErrOrderDurationExceeded = errors.New("order duration has exceeded the bidding duration") // ErrInvalidDeposit indicates an invalid deposit - ErrInvalidDeposit = sdkerrors.Register(ModuleName, errInvalidDeposit, "Deposit invalid") + ErrInvalidDeposit = errors.New("Deposit invalid") // ErrInvalidParam indicates an invalid chain parameter - ErrInvalidParam = sdkerrors.Register(ModuleName, errInvalidParam, "parameter invalid") + ErrInvalidParam = errors.New("parameter invalid") // ErrUnknownProvider indicates an invalid chain parameter - ErrUnknownProvider = sdkerrors.Register(ModuleName, errUnknownProvider, "unknown provider") + ErrUnknownProvider = errors.New("unknown provider") // ErrInvalidBid indicates an invalid chain parameter - ErrInvalidBid = sdkerrors.Register(ModuleName, errInvalidBid, "unknown provider") + ErrInvalidBid = errors.New("unknown provider") ) diff --git a/go/node/market/v1beta5/bid.go b/go/node/market/v1beta5/bid.go new file mode 100644 index 00000000..ce83b154 --- /dev/null +++ b/go/node/market/v1beta5/bid.go @@ -0,0 +1,82 @@ +package v1beta5 + +import ( + "sort" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" +) + +type ResourcesOffer []ResourceOffer + +var _ sort.Interface = (*ResourcesOffer)(nil) + +func (s ResourcesOffer) MatchGSpec(gspec dtypes.GroupSpec) bool { + if len(s) == 0 { + return true + } + + ru := make(map[uint32]*dtypes.ResourceUnit) + + for idx := range gspec.Resources { + ru[gspec.Resources[idx].ID] = &gspec.Resources[idx] + } + + for _, ro := range s { + res, exists := ru[ro.Resources.ID] + if !exists { + return false + } + + ru[ro.Resources.ID] = nil + + if res.Count != ro.Count { + return false + } + + // TODO @troian check resources boundaries + } + + return true +} + +func (r *ResourceOffer) Dup() ResourceOffer { + return ResourceOffer{ + Resources: r.Resources.Dup(), + Count: r.Count, + } +} + +func (s ResourcesOffer) Len() int { + return len(s) +} + +func (s ResourcesOffer) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s ResourcesOffer) Less(i, j int) bool { + return s[i].Resources.ID < s[j].Resources.ID +} + +func (s ResourcesOffer) Dup() ResourcesOffer { + res := make(ResourcesOffer, 0, len(s)) + + for _, ru := range s { + res = append(res, ru.Dup()) + } + + return res +} + +func ResourceOfferFromRU(ru dtypes.ResourceUnits) ResourcesOffer { + res := make(ResourcesOffer, 0, len(ru)) + + for _, r := range ru { + res = append(res, ResourceOffer{ + Resources: r.Resources, + Count: r.Count, + }) + } + + return res +} diff --git a/go/node/market/v1beta5/bid.pb.go b/go/node/market/v1beta5/bid.pb.go new file mode 100644 index 00000000..d59485d9 --- /dev/null +++ b/go/node/market/v1beta5/bid.pb.go @@ -0,0 +1,2338 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/bid.proto + +package v1beta5 + +import ( + fmt "fmt" + v1 "github.com/akash-network/akash-api/go/node/types/resources/v1" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of bid +type Bid_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + BidStateInvalid Bid_State = 0 + // BidOpen denotes state for bid open + BidOpen Bid_State = 1 + // BidMatched denotes state for bid open + BidActive Bid_State = 2 + // BidLost denotes state for bid lost + BidLost Bid_State = 3 + // BidClosed denotes state for bid closed + BidClosed Bid_State = 4 +) + +var Bid_State_name = map[int32]string{ + 0: "invalid", + 1: "open", + 2: "active", + 3: "lost", + 4: "closed", +} + +var Bid_State_value = map[string]int32{ + "invalid": 0, + "open": 1, + "active": 2, + "lost": 3, + "closed": 4, +} + +func (x Bid_State) String() string { + return proto.EnumName(Bid_State_name, int32(x)) +} + +func (Bid_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{6, 0} +} + +// ResourceOffer describes resources that provider is offering +// for deployment +type ResourceOffer struct { + Resources v1.Resources `protobuf:"bytes,1,opt,name=resources,proto3" json:"resources" yaml:"resources"` + Count uint32 `protobuf:"varint,2,opt,name=count,proto3" json:"count" yaml:"count"` +} + +func (m *ResourceOffer) Reset() { *m = ResourceOffer{} } +func (m *ResourceOffer) String() string { return proto.CompactTextString(m) } +func (*ResourceOffer) ProtoMessage() {} +func (*ResourceOffer) Descriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{0} +} +func (m *ResourceOffer) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceOffer) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceOffer.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceOffer) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceOffer.Merge(m, src) +} +func (m *ResourceOffer) XXX_Size() int { + return m.Size() +} +func (m *ResourceOffer) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceOffer.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceOffer proto.InternalMessageInfo + +func (m *ResourceOffer) GetResources() v1.Resources { + if m != nil { + return m.Resources + } + return v1.Resources{} +} + +func (m *ResourceOffer) GetCount() uint32 { + if m != nil { + return m.Count + } + return 0 +} + +// MsgCreateBid defines an SDK message for creating Bid +type MsgCreateBid struct { + Order OrderID `protobuf:"bytes,1,opt,name=order,proto3" json:"order" yaml:"order"` + Provider string `protobuf:"bytes,2,opt,name=provider,proto3" json:"provider" yaml:"provider"` + Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` + Deposit types.Coin `protobuf:"bytes,4,opt,name=deposit,proto3" json:"deposit" yaml:"deposit"` + ResourcesOffer ResourcesOffer `protobuf:"bytes,5,rep,name=resources_offer,json=resourcesOffer,proto3,castrepeated=ResourcesOffer" json:"resources_offer" yaml:"resources_offer"` +} + +func (m *MsgCreateBid) Reset() { *m = MsgCreateBid{} } +func (m *MsgCreateBid) String() string { return proto.CompactTextString(m) } +func (*MsgCreateBid) ProtoMessage() {} +func (*MsgCreateBid) Descriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{1} +} +func (m *MsgCreateBid) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateBid.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateBid) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateBid.Merge(m, src) +} +func (m *MsgCreateBid) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateBid) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateBid.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateBid proto.InternalMessageInfo + +func (m *MsgCreateBid) GetOrder() OrderID { + if m != nil { + return m.Order + } + return OrderID{} +} + +func (m *MsgCreateBid) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func (m *MsgCreateBid) GetPrice() types.DecCoin { + if m != nil { + return m.Price + } + return types.DecCoin{} +} + +func (m *MsgCreateBid) GetDeposit() types.Coin { + if m != nil { + return m.Deposit + } + return types.Coin{} +} + +func (m *MsgCreateBid) GetResourcesOffer() ResourcesOffer { + if m != nil { + return m.ResourcesOffer + } + return nil +} + +// MsgCreateBidResponse defines the Msg/CreateBid response type. +type MsgCreateBidResponse struct { +} + +func (m *MsgCreateBidResponse) Reset() { *m = MsgCreateBidResponse{} } +func (m *MsgCreateBidResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateBidResponse) ProtoMessage() {} +func (*MsgCreateBidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{2} +} +func (m *MsgCreateBidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateBidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateBidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateBidResponse.Merge(m, src) +} +func (m *MsgCreateBidResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateBidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateBidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateBidResponse proto.InternalMessageInfo + +// MsgCloseBid defines an SDK message for closing bid +type MsgCloseBid struct { + BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCloseBid) Reset() { *m = MsgCloseBid{} } +func (m *MsgCloseBid) String() string { return proto.CompactTextString(m) } +func (*MsgCloseBid) ProtoMessage() {} +func (*MsgCloseBid) Descriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{3} +} +func (m *MsgCloseBid) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseBid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseBid.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseBid) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseBid.Merge(m, src) +} +func (m *MsgCloseBid) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseBid) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseBid.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseBid proto.InternalMessageInfo + +func (m *MsgCloseBid) GetBidID() BidID { + if m != nil { + return m.BidID + } + return BidID{} +} + +// MsgCloseBidResponse defines the Msg/CloseBid response type. +type MsgCloseBidResponse struct { +} + +func (m *MsgCloseBidResponse) Reset() { *m = MsgCloseBidResponse{} } +func (m *MsgCloseBidResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCloseBidResponse) ProtoMessage() {} +func (*MsgCloseBidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{4} +} +func (m *MsgCloseBidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseBidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseBidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseBidResponse.Merge(m, src) +} +func (m *MsgCloseBidResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseBidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseBidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseBidResponse proto.InternalMessageInfo + +// BidID stores owner and all other seq numbers +// A successful bid becomes a Lease(ID). +type BidID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` +} + +func (m *BidID) Reset() { *m = BidID{} } +func (*BidID) ProtoMessage() {} +func (*BidID) Descriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{5} +} +func (m *BidID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BidID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BidID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BidID) XXX_Merge(src proto.Message) { + xxx_messageInfo_BidID.Merge(m, src) +} +func (m *BidID) XXX_Size() int { + return m.Size() +} +func (m *BidID) XXX_DiscardUnknown() { + xxx_messageInfo_BidID.DiscardUnknown(m) +} + +var xxx_messageInfo_BidID proto.InternalMessageInfo + +func (m *BidID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *BidID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *BidID) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *BidID) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *BidID) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +// Bid stores BidID, state of bid and price +type Bid struct { + BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` + State Bid_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta5.Bid_State" json:"state" yaml:"state"` + Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + ResourcesOffer ResourcesOffer `protobuf:"bytes,5,rep,name=resources_offer,json=resourcesOffer,proto3,castrepeated=ResourcesOffer" json:"resources_offer" yaml:"resources_offer"` +} + +func (m *Bid) Reset() { *m = Bid{} } +func (*Bid) ProtoMessage() {} +func (*Bid) Descriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{6} +} +func (m *Bid) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Bid) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Bid.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Bid) XXX_Merge(src proto.Message) { + xxx_messageInfo_Bid.Merge(m, src) +} +func (m *Bid) XXX_Size() int { + return m.Size() +} +func (m *Bid) XXX_DiscardUnknown() { + xxx_messageInfo_Bid.DiscardUnknown(m) +} + +var xxx_messageInfo_Bid proto.InternalMessageInfo + +func (m *Bid) GetBidID() BidID { + if m != nil { + return m.BidID + } + return BidID{} +} + +func (m *Bid) GetState() Bid_State { + if m != nil { + return m.State + } + return BidStateInvalid +} + +func (m *Bid) GetPrice() types.DecCoin { + if m != nil { + return m.Price + } + return types.DecCoin{} +} + +func (m *Bid) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Bid) GetResourcesOffer() ResourcesOffer { + if m != nil { + return m.ResourcesOffer + } + return nil +} + +// BidFilters defines flags for bid list filter +type BidFilters struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` + State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *BidFilters) Reset() { *m = BidFilters{} } +func (m *BidFilters) String() string { return proto.CompactTextString(m) } +func (*BidFilters) ProtoMessage() {} +func (*BidFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_9a051a9ee62f13b0, []int{7} +} +func (m *BidFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *BidFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_BidFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *BidFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_BidFilters.Merge(m, src) +} +func (m *BidFilters) XXX_Size() int { + return m.Size() +} +func (m *BidFilters) XXX_DiscardUnknown() { + xxx_messageInfo_BidFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_BidFilters proto.InternalMessageInfo + +func (m *BidFilters) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *BidFilters) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *BidFilters) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *BidFilters) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *BidFilters) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func (m *BidFilters) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func init() { + proto.RegisterEnum("akash.market.v1beta5.Bid_State", Bid_State_name, Bid_State_value) + proto.RegisterType((*ResourceOffer)(nil), "akash.market.v1beta5.ResourceOffer") + proto.RegisterType((*MsgCreateBid)(nil), "akash.market.v1beta5.MsgCreateBid") + proto.RegisterType((*MsgCreateBidResponse)(nil), "akash.market.v1beta5.MsgCreateBidResponse") + proto.RegisterType((*MsgCloseBid)(nil), "akash.market.v1beta5.MsgCloseBid") + proto.RegisterType((*MsgCloseBidResponse)(nil), "akash.market.v1beta5.MsgCloseBidResponse") + proto.RegisterType((*BidID)(nil), "akash.market.v1beta5.BidID") + proto.RegisterType((*Bid)(nil), "akash.market.v1beta5.Bid") + proto.RegisterType((*BidFilters)(nil), "akash.market.v1beta5.BidFilters") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/bid.proto", fileDescriptor_9a051a9ee62f13b0) } + +var fileDescriptor_9a051a9ee62f13b0 = []byte{ + // 897 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x6f, 0xe3, 0x44, + 0x14, 0x8f, 0x13, 0xbb, 0xdd, 0x4c, 0xb6, 0x6d, 0xe4, 0xed, 0xae, 0xd2, 0x40, 0x3d, 0x66, 0x10, + 0xd0, 0x0b, 0xb6, 0xda, 0x15, 0x07, 0xca, 0x69, 0xbd, 0x15, 0xa8, 0x12, 0xa8, 0xe0, 0x02, 0x07, + 0x38, 0x54, 0x8e, 0x67, 0xd6, 0x3b, 0x6a, 0xea, 0x71, 0x3d, 0x6e, 0x56, 0x7c, 0x03, 0xd4, 0x13, + 0x12, 0x17, 0x2e, 0x45, 0x2b, 0xed, 0x8d, 0x0b, 0x5f, 0x63, 0x8f, 0x3d, 0x72, 0x32, 0x28, 0x15, + 0x12, 0xca, 0x31, 0x5f, 0x00, 0x34, 0x7f, 0x62, 0x27, 0x55, 0x40, 0xe2, 0xb0, 0x07, 0x24, 0x4e, + 0xee, 0xfb, 0xbd, 0xf7, 0x7b, 0xef, 0xf9, 0xbd, 0xdf, 0x6b, 0x0c, 0x9c, 0xe8, 0x34, 0xe2, 0x4f, + 0xfd, 0xb3, 0x28, 0x3f, 0x25, 0x85, 0x3f, 0xda, 0x1d, 0x90, 0x22, 0x7a, 0xcf, 0x1f, 0x50, 0xec, + 0x65, 0x39, 0x2b, 0x98, 0xbd, 0x29, 0xfd, 0x9e, 0xf2, 0x7b, 0xda, 0xdf, 0xdf, 0x4c, 0x58, 0xc2, + 0x64, 0x80, 0x2f, 0xfe, 0x52, 0xb1, 0x7d, 0x27, 0x66, 0xfc, 0x8c, 0x71, 0x7f, 0x10, 0x71, 0xa2, + 0x53, 0xed, 0xfa, 0x31, 0xa3, 0xa9, 0xf6, 0xbf, 0xa3, 0x6a, 0x49, 0x77, 0x4e, 0x38, 0xbb, 0xc8, + 0x63, 0xc2, 0xfd, 0xd1, 0x6e, 0x6d, 0xe8, 0x40, 0x77, 0x69, 0x53, 0x2c, 0xc7, 0x24, 0x57, 0x11, + 0xe8, 0x67, 0x03, 0xac, 0x85, 0x9a, 0x75, 0xf4, 0xe4, 0x09, 0xc9, 0xed, 0x04, 0xb4, 0xab, 0x34, + 0x3d, 0xc3, 0x35, 0x76, 0x3a, 0x7b, 0xc8, 0x53, 0xcd, 0x8b, 0x82, 0x5e, 0x5d, 0x63, 0xb4, 0xeb, + 0xcd, 0xa8, 0x3c, 0x78, 0xeb, 0x65, 0x09, 0x1b, 0x93, 0x12, 0xd6, 0xe4, 0x69, 0x09, 0xbb, 0xdf, + 0x44, 0x67, 0xc3, 0x7d, 0x54, 0x41, 0x28, 0xac, 0xdd, 0xb6, 0x0f, 0xac, 0x98, 0x5d, 0xa4, 0x45, + 0xaf, 0xe9, 0x1a, 0x3b, 0x6b, 0xc1, 0xd6, 0xa4, 0x84, 0x0a, 0x98, 0x96, 0xf0, 0xae, 0x22, 0x4a, + 0x13, 0x85, 0x0a, 0xde, 0x37, 0xff, 0x78, 0x0e, 0x0d, 0xf4, 0x67, 0x0b, 0xdc, 0xfd, 0x84, 0x27, + 0x8f, 0x73, 0x12, 0x15, 0x24, 0xa0, 0xd8, 0xfe, 0x1a, 0x58, 0xf2, 0x8d, 0x74, 0xb3, 0xdb, 0xde, + 0xb2, 0x49, 0x7b, 0x47, 0x22, 0xe4, 0xf0, 0x20, 0x78, 0x5b, 0xf4, 0x39, 0x2e, 0xa1, 0x25, 0x01, + 0x51, 0x53, 0x92, 0xeb, 0x9a, 0xd2, 0x44, 0xa1, 0x82, 0xed, 0x0f, 0xc0, 0x9d, 0x2c, 0x67, 0x23, + 0x2a, 0xf2, 0x8b, 0x3e, 0xdb, 0x01, 0x9c, 0x94, 0xb0, 0xc2, 0xa6, 0x25, 0xdc, 0x50, 0xb4, 0x19, + 0x82, 0xc2, 0xca, 0x69, 0x7f, 0x06, 0xac, 0x2c, 0xa7, 0x31, 0xe9, 0xb5, 0x64, 0x67, 0xaf, 0x7b, + 0x6a, 0xaf, 0x6a, 0x8e, 0x7a, 0xaf, 0xde, 0x01, 0x89, 0x1f, 0x33, 0x9a, 0x06, 0xdb, 0x7a, 0x80, + 0x8a, 0x52, 0xf7, 0x23, 0x4d, 0x14, 0x2a, 0xd8, 0xfe, 0x12, 0xac, 0x62, 0x92, 0x31, 0x4e, 0x8b, + 0x9e, 0x29, 0x93, 0x6e, 0x2d, 0x4d, 0x2a, 0x33, 0xbe, 0xa1, 0x33, 0xce, 0x18, 0xd3, 0x12, 0xae, + 0xab, 0x9c, 0x1a, 0x40, 0xe1, 0xcc, 0x65, 0xbf, 0x30, 0xc0, 0x46, 0xb5, 0x9a, 0x13, 0x26, 0x94, + 0xd0, 0xb3, 0xdc, 0xd6, 0x4e, 0x67, 0xef, 0xcd, 0xe5, 0xf3, 0x5c, 0x10, 0x4d, 0xf0, 0x85, 0x9e, + 0xea, 0x7a, 0x25, 0x08, 0x89, 0x4f, 0x4a, 0x78, 0x3b, 0xeb, 0xb4, 0x84, 0x0f, 0x6e, 0xa9, 0x42, + 0x39, 0xd0, 0x4f, 0xbf, 0xde, 0xa6, 0x87, 0xeb, 0xf9, 0x82, 0x2d, 0x15, 0xd0, 0x40, 0x0f, 0xc0, + 0xe6, 0xbc, 0x00, 0x42, 0xc2, 0x33, 0x96, 0x72, 0x82, 0x28, 0xe8, 0x08, 0x7c, 0xc8, 0xb8, 0xd4, + 0xc5, 0xe7, 0x60, 0x65, 0x40, 0xf1, 0x09, 0xc5, 0x5a, 0x18, 0xaf, 0x2d, 0x7f, 0x91, 0x80, 0xe2, + 0xc3, 0x83, 0xc0, 0x9d, 0xc9, 0x42, 0x9a, 0x93, 0x12, 0x36, 0x29, 0x9e, 0x96, 0xb0, 0xad, 0x5a, + 0xa5, 0x18, 0x85, 0xd6, 0x80, 0xe2, 0x43, 0xac, 0x5b, 0xb8, 0x0f, 0xee, 0xcd, 0x95, 0xaa, 0x3a, + 0xf8, 0xb1, 0x09, 0x54, 0x02, 0x21, 0x6e, 0xf6, 0x2c, 0xd5, 0xa2, 0x6c, 0x2b, 0x71, 0x4b, 0x60, + 0x4e, 0x68, 0xc2, 0x14, 0x42, 0x13, 0x4f, 0xfb, 0x21, 0x30, 0x31, 0x27, 0xe7, 0x52, 0x64, 0x66, + 0x00, 0xc7, 0x25, 0x34, 0x0f, 0x8e, 0xc9, 0xf9, 0xa4, 0x84, 0x12, 0x9f, 0x96, 0xb0, 0xa3, 0x77, + 0xc7, 0xc9, 0x39, 0x0a, 0x25, 0x28, 0x48, 0x89, 0x20, 0xb5, 0xe4, 0x05, 0x49, 0xd2, 0x47, 0x9a, + 0x94, 0x2c, 0x90, 0x12, 0x45, 0x4a, 0x34, 0x89, 0x09, 0x92, 0x59, 0x93, 0x8e, 0x34, 0x89, 0x2d, + 0x90, 0x98, 0x22, 0x89, 0xc7, 0xc2, 0x1d, 0x58, 0xff, 0xf2, 0x0e, 0xf6, 0xef, 0xfc, 0xf0, 0x1c, + 0x36, 0xe4, 0xdc, 0x7e, 0x37, 0x41, 0xeb, 0x95, 0xed, 0xc6, 0xfe, 0x14, 0x58, 0xbc, 0x88, 0x0a, + 0x22, 0x87, 0xb8, 0xbe, 0x07, 0xff, 0x36, 0xa9, 0x77, 0x2c, 0xc2, 0xd4, 0x56, 0x24, 0xa3, 0xde, + 0x8a, 0x34, 0x51, 0xa8, 0xe0, 0x57, 0x71, 0xc1, 0xdb, 0x00, 0xc4, 0x52, 0xba, 0xf8, 0x24, 0x52, + 0x47, 0xdc, 0x0a, 0xdb, 0x1a, 0x79, 0xf4, 0x1f, 0x39, 0x44, 0xf4, 0xbd, 0x01, 0x2c, 0x39, 0x43, + 0xdb, 0x05, 0xab, 0x34, 0x1d, 0x45, 0x43, 0x8a, 0xbb, 0x8d, 0xfe, 0xbd, 0xcb, 0x2b, 0x77, 0x23, + 0xa0, 0x58, 0xba, 0x0e, 0x15, 0x6c, 0xdf, 0x07, 0x26, 0xcb, 0x48, 0xda, 0x35, 0xfa, 0x9d, 0xcb, + 0x2b, 0x77, 0x35, 0xa0, 0xf8, 0x28, 0x23, 0xa9, 0xbd, 0x05, 0x56, 0xa2, 0xb8, 0xa0, 0x23, 0xd2, + 0x6d, 0xf6, 0xd7, 0x2e, 0xaf, 0xdc, 0x76, 0x40, 0xf1, 0x23, 0x09, 0x08, 0xc6, 0x90, 0xf1, 0xa2, + 0xdb, 0xaa, 0x18, 0x1f, 0x33, 0x5e, 0x08, 0x46, 0x2c, 0x2e, 0x0e, 0x77, 0xcd, 0x8a, 0x21, 0x4f, + 0x10, 0xf7, 0xcd, 0x6f, 0x5f, 0x38, 0x8d, 0x39, 0x9d, 0x5d, 0x37, 0x01, 0x08, 0x28, 0xfe, 0x90, + 0x0e, 0x0b, 0x92, 0xf3, 0xff, 0xaf, 0x71, 0xfe, 0x57, 0xc9, 0x9f, 0x5d, 0xc9, 0x4a, 0x3d, 0x8c, + 0x7f, 0x3a, 0x02, 0xf5, 0x2f, 0x2f, 0x38, 0x7e, 0x39, 0x76, 0x8c, 0xeb, 0xb1, 0x63, 0xfc, 0x36, + 0x76, 0x8c, 0xef, 0x6e, 0x9c, 0xc6, 0xf5, 0x8d, 0xd3, 0xf8, 0xe5, 0xc6, 0x69, 0x7c, 0xf5, 0x7e, + 0x42, 0x8b, 0xa7, 0x17, 0x03, 0x2f, 0x66, 0x67, 0xbe, 0x94, 0xe8, 0xbb, 0x29, 0x29, 0x9e, 0xb1, + 0xfc, 0x54, 0x5b, 0x51, 0x46, 0xfd, 0x84, 0xf9, 0x29, 0xc3, 0xe4, 0xd6, 0xa7, 0xc8, 0x60, 0x45, + 0x7e, 0x85, 0x3c, 0xfc, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x48, 0xf9, 0x25, 0x2a, 0x3e, 0x09, 0x00, + 0x00, +} + +func (this *ResourceOffer) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceOffer) + if !ok { + that2, ok := that.(ResourceOffer) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Resources.Equal(&that1.Resources) { + return false + } + if this.Count != that1.Count { + return false + } + return true +} +func (m *ResourceOffer) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceOffer) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceOffer) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Count != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.Count)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.Resources.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCreateBid) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateBid) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourcesOffer) > 0 { + for iNdEx := len(m.ResourcesOffer) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourcesOffer[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + { + size, err := m.Deposit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + { + size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCreateBidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateBidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCloseBid) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseBid) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseBid) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCloseBidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseBidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *BidID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BidID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BidID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Bid) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Bid) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Bid) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.ResourcesOffer) > 0 { + for iNdEx := len(m.ResourcesOffer) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ResourcesOffer[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } + if m.CreatedAt != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.State != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintBid(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *BidFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BidFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *BidFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintBid(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + } + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintBid(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintBid(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintBid(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintBid(dAtA []byte, offset int, v uint64) int { + offset -= sovBid(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResourceOffer) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Resources.Size() + n += 1 + l + sovBid(uint64(l)) + if m.Count != 0 { + n += 1 + sovBid(uint64(m.Count)) + } + return n +} + +func (m *MsgCreateBid) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Order.Size() + n += 1 + l + sovBid(uint64(l)) + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovBid(uint64(l)) + } + l = m.Price.Size() + n += 1 + l + sovBid(uint64(l)) + l = m.Deposit.Size() + n += 1 + l + sovBid(uint64(l)) + if len(m.ResourcesOffer) > 0 { + for _, e := range m.ResourcesOffer { + l = e.Size() + n += 1 + l + sovBid(uint64(l)) + } + } + return n +} + +func (m *MsgCreateBidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCloseBid) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BidID.Size() + n += 1 + l + sovBid(uint64(l)) + return n +} + +func (m *MsgCloseBidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *BidID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovBid(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovBid(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovBid(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovBid(uint64(m.OSeq)) + } + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovBid(uint64(l)) + } + return n +} + +func (m *Bid) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BidID.Size() + n += 1 + l + sovBid(uint64(l)) + if m.State != 0 { + n += 1 + sovBid(uint64(m.State)) + } + l = m.Price.Size() + n += 1 + l + sovBid(uint64(l)) + if m.CreatedAt != 0 { + n += 1 + sovBid(uint64(m.CreatedAt)) + } + if len(m.ResourcesOffer) > 0 { + for _, e := range m.ResourcesOffer { + l = e.Size() + n += 1 + l + sovBid(uint64(l)) + } + } + return n +} + +func (m *BidFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovBid(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovBid(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovBid(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovBid(uint64(m.OSeq)) + } + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovBid(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovBid(uint64(l)) + } + return n +} + +func sovBid(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozBid(x uint64) (n int) { + return sovBid(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResourceOffer) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceOffer: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceOffer: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Resources", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Resources.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Count", wireType) + } + m.Count = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Count |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateBid) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateBid: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateBid: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Deposit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Deposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourcesOffer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourcesOffer = append(m.ResourcesOffer, ResourceOffer{}) + if err := m.ResourcesOffer[len(m.ResourcesOffer)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateBidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateBidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseBid) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseBid: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseBid: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseBidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseBidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BidID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BidID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BidID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Bid) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Bid: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Bid: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Bid_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResourcesOffer", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResourcesOffer = append(m.ResourcesOffer, ResourceOffer{}) + if err := m.ResourcesOffer[len(m.ResourcesOffer)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BidFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BidFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BidFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowBid + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthBid + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthBid + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipBid(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthBid + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipBid(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowBid + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthBid + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupBid + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthBid + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthBid = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowBid = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupBid = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/bid_test.go b/go/node/market/v1beta5/bid_test.go new file mode 100644 index 00000000..95006dc2 --- /dev/null +++ b/go/node/market/v1beta5/bid_test.go @@ -0,0 +1,48 @@ +package v1beta5_test + +import ( + "testing" + + "github.com/stretchr/testify/require" + + testutil "github.com/akash-network/akash-api/go/node/client/testutil/v1beta3" + + "github.com/akash-network/akash-api/go/node/market/v1beta5" +) + +func TestBid_GSpecMatch_Valid(t *testing.T) { + gspec := testutil.GroupSpec(t) + + rOffer := v1beta5.ResourceOfferFromRU(gspec.Resources) + + require.True(t, rOffer.MatchGSpec(gspec)) +} + +func TestBid_GSpecMatch_Valid2(t *testing.T) { + gspec := testutil.GroupSpec(t) + + if len(gspec.Resources) == 1 { + rl := testutil.ResourcesList(t, 2) + rl[0].Count = 4 + gspec.Resources = append(gspec.Resources, rl...) + } + + rOffer := v1beta5.ResourceOfferFromRU(gspec.Resources) + + require.True(t, rOffer.MatchGSpec(gspec)) +} + +func TestBid_GSpecMatch_InvalidCount(t *testing.T) { + gspec := testutil.GroupSpec(t) + + if len(gspec.Resources) == 1 { + rl := testutil.ResourcesList(t, 2) + gspec.Resources = append(gspec.Resources, rl...) + } + + rOffer := v1beta5.ResourceOfferFromRU(gspec.Resources) + + gspec.Resources[0].Count = 2 + + require.False(t, rOffer.MatchGSpec(gspec)) +} diff --git a/go/node/market/v1beta5/codec.go b/go/node/market/v1beta5/codec.go new file mode 100644 index 00000000..47b0235e --- /dev/null +++ b/go/node/market/v1beta5/codec.go @@ -0,0 +1,50 @@ +package v1beta5 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + amino = codec.NewLegacyAmino() + + // ModuleCdc references the global x/market module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/market and + // defined at the application level. + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +func init() { + RegisterLegacyAminoCodec(amino) + cryptocodec.RegisterCrypto(amino) + amino.Seal() +} + +// RegisterLegacyAminoCodec registers the necessary x/market interfaces and concrete types +// on the provided Amino codec. These types are used for Amino JSON serialization. +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgCreateBid{}, ModuleName+"/"+MsgTypeCreateBid, nil) + cdc.RegisterConcrete(&MsgCloseBid{}, ModuleName+"/"+MsgTypeCloseBid, nil) + cdc.RegisterConcrete(&MsgCreateLease{}, ModuleName+"/"+MsgTypeCreateLease, nil) + cdc.RegisterConcrete(&MsgWithdrawLease{}, ModuleName+"/"+MsgTypeWithdrawLease, nil) + cdc.RegisterConcrete(&MsgCloseLease{}, ModuleName+"/"+MsgTypeCloseLease, nil) +} + +// RegisterInterfaces registers the x/market interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCreateBid{}, + &MsgCloseBid{}, + &MsgCreateLease{}, + &MsgWithdrawLease{}, + &MsgCloseLease{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/market/v1beta5/errors.go b/go/node/market/v1beta5/errors.go new file mode 100644 index 00000000..ba941f1a --- /dev/null +++ b/go/node/market/v1beta5/errors.go @@ -0,0 +1,107 @@ +package v1beta5 + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + errCodeEmptyProvider uint32 = iota + 1 + errCodeSameAccount + errCodeInternal + errCodeOverOrder + errCodeAttributeMismatch + errCodeUnknownBid + errCodeUnknownLease + errCodeUnknownLeaseForOrder + errCodeUnknownOrderForBid + errCodeLeaseNotActive + errCodeBidNotActive + errCodeBidNotOpen + errCodeOrderNotOpen + errCodeNoLeaseForOrder + errCodeOrderNotFound + errCodeGroupNotFound + errCodeGroupNotOpen + errCodeBidNotFound + errCodeBidZeroPrice + errCodeLeaseNotFound + errCodeBidExists + errCodeInvalidPrice + errCodeOrderActive + errCodeOrderClosed + errCodeOrderExists + errCodeOrderDurationExceeded + errCodeOrderTooEarly + errInvalidDeposit + errInvalidParam + errUnknownProvider + errInvalidBid + errCodeCapabilitiesMismatch +) + +var ( + // ErrEmptyProvider is the error when provider is empty + ErrEmptyProvider = sdkerrors.Register(ModuleName, errCodeEmptyProvider, "empty provider") + // ErrSameAccount is the error when owner and provider are the same account + ErrSameAccount = sdkerrors.Register(ModuleName, errCodeSameAccount, "owner and provider are the same account") + // ErrInternal is the error for internal error + ErrInternal = sdkerrors.Register(ModuleName, errCodeInternal, "internal error") + // ErrBidOverOrder is the error when bid price is above max order price + ErrBidOverOrder = sdkerrors.Register(ModuleName, errCodeOverOrder, "bid price above max order price") + // ErrAttributeMismatch is the error for attribute mismatch + ErrAttributeMismatch = sdkerrors.Register(ModuleName, errCodeAttributeMismatch, "attribute mismatch") + // ErrCapabilitiesMismatch is the error for capabilities mismatch + ErrCapabilitiesMismatch = sdkerrors.Register(ModuleName, errCodeCapabilitiesMismatch, "capabilities mismatch") + // ErrUnknownBid is the error for unknown bid + ErrUnknownBid = sdkerrors.Register(ModuleName, errCodeUnknownBid, "unknown bid") + // ErrUnknownLease is the error for unknown bid + ErrUnknownLease = sdkerrors.Register(ModuleName, errCodeUnknownLease, "unknown lease") + // ErrUnknownLeaseForBid is the error when lease is unknown for bid + ErrUnknownLeaseForBid = sdkerrors.Register(ModuleName, errCodeUnknownLeaseForOrder, "unknown lease for bid") + // ErrUnknownOrderForBid is the error when order is unknown for bid + ErrUnknownOrderForBid = sdkerrors.Register(ModuleName, errCodeUnknownOrderForBid, "unknown order for bid") + // ErrLeaseNotActive is the error when lease is not active + ErrLeaseNotActive = sdkerrors.Register(ModuleName, errCodeLeaseNotActive, "lease not active") + // ErrBidNotActive is the error when bid is not matched + ErrBidNotActive = sdkerrors.Register(ModuleName, errCodeBidNotActive, "bid not active") + // ErrBidNotOpen is the error when bid is not matched + ErrBidNotOpen = sdkerrors.Register(ModuleName, errCodeBidNotOpen, "bid not open") + // ErrNoLeaseForOrder is the error when there is no lease for order + ErrNoLeaseForOrder = sdkerrors.Register(ModuleName, errCodeNoLeaseForOrder, "no lease for order") + // ErrOrderNotFound order not found + ErrOrderNotFound = sdkerrors.Register(ModuleName, errCodeOrderNotFound, "invalid order: order not found") + // ErrGroupNotFound order not found + ErrGroupNotFound = sdkerrors.Register(ModuleName, errCodeGroupNotFound, "order not found") + // ErrGroupNotOpen order not found + ErrGroupNotOpen = sdkerrors.Register(ModuleName, errCodeGroupNotOpen, "order not open") + // ErrOrderNotOpen order not found + ErrOrderNotOpen = sdkerrors.Register(ModuleName, errCodeOrderNotOpen, "bid: order not open") + // ErrBidNotFound bid not found + ErrBidNotFound = sdkerrors.Register(ModuleName, errCodeBidNotFound, "invalid bid: bid not found") + // ErrBidZeroPrice zero price + ErrBidZeroPrice = sdkerrors.Register(ModuleName, errCodeBidZeroPrice, "invalid bid: zero price") + // ErrLeaseNotFound lease not found + ErrLeaseNotFound = sdkerrors.Register(ModuleName, errCodeLeaseNotFound, "invalid lease: lease not found") + // ErrBidExists bid exists + ErrBidExists = sdkerrors.Register(ModuleName, errCodeBidExists, "invalid bid: bid exists from provider") + // ErrBidInvalidPrice bid invalid price + ErrBidInvalidPrice = sdkerrors.Register(ModuleName, errCodeInvalidPrice, "bid price is invalid") + // ErrOrderActive order active + ErrOrderActive = sdkerrors.New(ModuleName, errCodeOrderActive, "order active") + // ErrOrderClosed order closed + ErrOrderClosed = sdkerrors.New(ModuleName, errCodeOrderClosed, "order closed") + // ErrOrderExists indicates a new order was proposed overwrite the existing store key + ErrOrderExists = sdkerrors.New(ModuleName, errCodeOrderExists, "order already exists in store") + // ErrOrderTooEarly to match bid + ErrOrderTooEarly = sdkerrors.New(ModuleName, errCodeOrderTooEarly, "order: chain height to low for bidding") + // ErrOrderDurationExceeded order should be closed + ErrOrderDurationExceeded = sdkerrors.New(ModuleName, errCodeOrderDurationExceeded, "order duration has exceeded the bidding duration") + // ErrInvalidDeposit indicates an invalid deposit + ErrInvalidDeposit = sdkerrors.Register(ModuleName, errInvalidDeposit, "Deposit invalid") + // ErrInvalidParam indicates an invalid chain parameter + ErrInvalidParam = sdkerrors.Register(ModuleName, errInvalidParam, "parameter invalid") + // ErrUnknownProvider indicates an invalid chain parameter + ErrUnknownProvider = sdkerrors.Register(ModuleName, errUnknownProvider, "unknown provider") + // ErrInvalidBid indicates an invalid chain parameter + ErrInvalidBid = sdkerrors.Register(ModuleName, errInvalidBid, "unknown provider") +) diff --git a/go/node/market/v1beta5/escrow.go b/go/node/market/v1beta5/escrow.go new file mode 100644 index 00000000..328b81fb --- /dev/null +++ b/go/node/market/v1beta5/escrow.go @@ -0,0 +1,60 @@ +package v1beta5 + +import ( + "fmt" + "strconv" + "strings" + + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + etypes "github.com/akash-network/akash-api/go/node/escrow/v1beta3" +) + +const ( + bidEscrowScope = "bid" +) + +func EscrowAccountForBid(id BidID) etypes.AccountID { + return etypes.AccountID{ + Scope: bidEscrowScope, + XID: id.String(), + } +} + +func EscrowPaymentForLease(id LeaseID) string { + return fmt.Sprintf("%v/%v/%s", id.GSeq, id.OSeq, id.Provider) +} + +func LeaseIDFromEscrowAccount(id etypes.AccountID, pid string) (LeaseID, bool) { + did, ok := dtypes.DeploymentIDFromEscrowAccount(id) + if !ok { + return LeaseID{}, false + } + + parts := strings.Split(pid, "/") + if len(parts) != 3 { + return LeaseID{}, false + } + + gseq, err := strconv.ParseUint(parts[0], 10, 32) + if err != nil { + return LeaseID{}, false + } + + oseq, err := strconv.ParseUint(parts[1], 10, 32) + if err != nil { + return LeaseID{}, false + } + + owner, err := sdk.AccAddressFromBech32(parts[2]) + if err != nil { + return LeaseID{}, false + } + + return MakeLeaseID( + MakeBidID( + MakeOrderID( + dtypes.MakeGroupID( + did, uint32(gseq)), uint32(oseq)), owner)), true +} diff --git a/go/node/market/v1beta5/event.go b/go/node/market/v1beta5/event.go new file mode 100644 index 00000000..bb9787b7 --- /dev/null +++ b/go/node/market/v1beta5/event.go @@ -0,0 +1,359 @@ +package v1beta5 + +import ( + "errors" + "strconv" + + sdk "github.com/cosmos/cosmos-sdk/types" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + "github.com/akash-network/akash-api/go/sdkutil" +) + +const ( + evActionOrderCreated = "order-created" + evActionOrderClosed = "order-closed" + evActionBidCreated = "bid-created" + evActionBidClosed = "bid-closed" + evActionLeaseCreated = "lease-created" + evActionLeaseClosed = "lease-closed" + + evOSeqKey = "oseq" + evProviderKey = "provider" + evPriceDenomKey = "price-denom" + evPriceAmountKey = "price-amount" +) + +var ( + ErrParsingPrice = errors.New("error parsing price") +) + +// EventOrderCreated struct +type EventOrderCreated struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID OrderID `json:"id"` +} + +func NewEventOrderCreated(id OrderID) EventOrderCreated { + return EventOrderCreated{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionOrderCreated, + }, + ID: id, + } +} + +// ToSDKEvent method creates new sdk event for EventOrderCreated struct +func (e EventOrderCreated) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderCreated), + }, orderIDEVAttributes(e.ID)...)..., + ) +} + +// EventOrderClosed struct +type EventOrderClosed struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID OrderID `json:"id"` +} + +func NewEventOrderClosed(id OrderID) EventOrderClosed { + return EventOrderClosed{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionOrderClosed, + }, + ID: id, + } +} + +// ToSDKEvent method creates new sdk event for EventOrderClosed struct +func (e EventOrderClosed) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionOrderClosed), + }, orderIDEVAttributes(e.ID)...)..., + ) +} + +// EventBidCreated struct +type EventBidCreated struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID BidID `json:"id"` + Price sdk.DecCoin `json:"price"` +} + +func NewEventBidCreated(id BidID, price sdk.DecCoin) EventBidCreated { + return EventBidCreated{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionBidCreated, + }, + ID: id, + Price: price, + } +} + +// ToSDKEvent method creates new sdk event for EventBidCreated struct +func (e EventBidCreated) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append( + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidCreated), + }, bidIDEVAttributes(e.ID)...), + priceEVAttributes(e.Price)...)..., + ) +} + +// EventBidClosed struct +type EventBidClosed struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID BidID `json:"id"` + Price sdk.DecCoin `json:"price"` +} + +func NewEventBidClosed(id BidID, price sdk.DecCoin) EventBidClosed { + return EventBidClosed{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionBidClosed, + }, + ID: id, + Price: price, + } +} + +// ToSDKEvent method creates new sdk event for EventBidClosed struct +func (e EventBidClosed) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append( + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionBidClosed), + }, bidIDEVAttributes(e.ID)...), + priceEVAttributes(e.Price)...)..., + ) +} + +// EventLeaseCreated struct +type EventLeaseCreated struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID LeaseID `json:"id"` + Price sdk.DecCoin `json:"price"` +} + +func NewEventLeaseCreated(id LeaseID, price sdk.DecCoin) EventLeaseCreated { + return EventLeaseCreated{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionLeaseCreated, + }, + ID: id, + Price: price, + } +} + +// ToSDKEvent method creates new sdk event for EventLeaseCreated struct +func (e EventLeaseCreated) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append( + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseCreated), + }, leaseIDEVAttributes(e.ID)...), + priceEVAttributes(e.Price)...)...) +} + +// EventLeaseClosed struct +type EventLeaseClosed struct { + Context sdkutil.BaseModuleEvent `json:"context"` + ID LeaseID `json:"id"` + Price sdk.DecCoin `json:"price"` +} + +func NewEventLeaseClosed(id LeaseID, price sdk.DecCoin) EventLeaseClosed { + return EventLeaseClosed{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: evActionLeaseClosed, + }, + ID: id, + Price: price, + } +} + +// ToSDKEvent method creates new sdk event for EventLeaseClosed struct +func (e EventLeaseClosed) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append( + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, evActionLeaseClosed), + }, leaseIDEVAttributes(e.ID)...), + priceEVAttributes(e.Price)...)...) +} + +// orderIDEVAttributes returns event attribues for given orderID +func orderIDEVAttributes(id OrderID) []sdk.Attribute { + return append(dtypes.GroupIDEVAttributes(id.GroupID()), + sdk.NewAttribute(evOSeqKey, strconv.FormatUint(uint64(id.OSeq), 10))) +} + +// parseEVOrderID returns orderID for given event attributes +func parseEVOrderID(attrs []sdk.Attribute) (OrderID, error) { + gid, err := dtypes.ParseEVGroupID(attrs) + if err != nil { + return OrderID{}, err + } + oseq, err := sdkutil.GetUint64(attrs, evOSeqKey) + if err != nil { + return OrderID{}, err + } + + return OrderID{ + Owner: gid.Owner, + DSeq: gid.DSeq, + GSeq: gid.GSeq, + OSeq: uint32(oseq), + }, nil + +} + +// bidIDEVAttributes returns event attribues for given bidID +func bidIDEVAttributes(id BidID) []sdk.Attribute { + return append(orderIDEVAttributes(id.OrderID()), + sdk.NewAttribute(evProviderKey, id.Provider)) +} + +// parseEVBidID returns bidID for given event attributes +func parseEVBidID(attrs []sdk.Attribute) (BidID, error) { + oid, err := parseEVOrderID(attrs) + if err != nil { + return BidID{}, err + } + + provider, err := sdkutil.GetAccAddress(attrs, evProviderKey) + if err != nil { + return BidID{}, err + } + + return BidID{ + Owner: oid.Owner, + DSeq: oid.DSeq, + GSeq: oid.GSeq, + OSeq: oid.OSeq, + Provider: provider.String(), + }, nil +} + +// leaseIDEVAttributes returns event attribues for given LeaseID +func leaseIDEVAttributes(id LeaseID) []sdk.Attribute { + return append(orderIDEVAttributes(id.OrderID()), + sdk.NewAttribute(evProviderKey, id.Provider)) +} + +// parseEVLeaseID returns leaseID for given event attributes +func parseEVLeaseID(attrs []sdk.Attribute) (LeaseID, error) { + bid, err := parseEVBidID(attrs) + if err != nil { + return LeaseID{}, err + } + return LeaseID(bid), nil +} + +func priceEVAttributes(price sdk.DecCoin) []sdk.Attribute { + return []sdk.Attribute{ + sdk.NewAttribute(evPriceDenomKey, price.Denom), + sdk.NewAttribute(evPriceAmountKey, price.Amount.String()), + } +} + +func parseEVPriceAttributes(attrs []sdk.Attribute) (sdk.DecCoin, error) { + denom, err := sdkutil.GetString(attrs, evPriceDenomKey) + if err != nil { + return sdk.DecCoin{}, err + } + + amounts, err := sdkutil.GetString(attrs, evPriceAmountKey) + if err != nil { + return sdk.DecCoin{}, err + } + + amount, err := sdk.NewDecFromStr(amounts) + if err != nil { + return sdk.DecCoin{}, ErrParsingPrice + } + + return sdk.NewDecCoinFromDec(denom, amount), nil +} + +// ParseEvent parses event and returns details of event and error if occurred +func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { + if ev.Type != sdkutil.EventTypeMessage { + return nil, sdkutil.ErrUnknownType + } + if ev.Module != ModuleName { + return nil, sdkutil.ErrUnknownModule + } + switch ev.Action { + + case evActionOrderCreated: + id, err := parseEVOrderID(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventOrderCreated(id), nil + case evActionOrderClosed: + id, err := parseEVOrderID(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventOrderClosed(id), nil + + case evActionBidCreated: + id, err := parseEVBidID(ev.Attributes) + if err != nil { + return nil, err + } + price, err := parseEVPriceAttributes(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventBidCreated(id, price), nil + case evActionBidClosed: + id, err := parseEVBidID(ev.Attributes) + if err != nil { + return nil, err + } + // optional price + price, _ := parseEVPriceAttributes(ev.Attributes) + return NewEventBidClosed(id, price), nil + + case evActionLeaseCreated: + id, err := parseEVLeaseID(ev.Attributes) + if err != nil { + return nil, err + } + price, err := parseEVPriceAttributes(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventLeaseCreated(id, price), nil + case evActionLeaseClosed: + id, err := parseEVLeaseID(ev.Attributes) + if err != nil { + return nil, err + } + // optional price + price, _ := parseEVPriceAttributes(ev.Attributes) + return NewEventLeaseClosed(id, price), nil + + default: + return nil, sdkutil.ErrUnknownAction + } +} diff --git a/go/node/market/v1beta5/events_test.go b/go/node/market/v1beta5/events_test.go new file mode 100644 index 00000000..e8bd3f01 --- /dev/null +++ b/go/node/market/v1beta5/events_test.go @@ -0,0 +1,459 @@ +package v1beta5 + +import ( + "errors" + "fmt" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/stretchr/testify/require" + + "github.com/akash-network/akash-api/go/sdkutil" +) + +var ( + errWildcard = errors.New("wildcard string error can't be matched") + evOwnerKey = "owner" + evDSeqKey = "dseq" + evGSeqKey = "gseq" +) + +type testEventParsing struct { + msg sdkutil.Event + expErr error +} + +func (tep testEventParsing) testMessageType() func(t *testing.T) { + _, err := ParseEvent(tep.msg) + return func(t *testing.T) { + // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. + if errors.Is(tep.expErr, errWildcard) { + require.Error(t, err) + } else { + require.Equal(t, tep.expErr, err) + } + } +} + +var TEPS = []testEventParsing{ + { + msg: sdkutil.Event{ + Type: "nil", + }, + expErr: sdkutil.ErrUnknownType, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + }, + expErr: sdkutil.ErrUnknownModule, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + }, + expErr: sdkutil.ErrUnknownAction, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: "nil", + }, + expErr: sdkutil.ErrUnknownModule, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: "nil", + }, + expErr: sdkutil.ErrUnknownAction, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionOrderCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionOrderCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "nooo", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionOrderCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "abc", + }, + }, + }, + expErr: errWildcard, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionOrderClosed, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + }, + }, + expErr: nil, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionBidCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + { + Key: evProviderKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evPriceDenomKey, + Value: "uakt", + }, + { + Key: evPriceAmountKey, + Value: "23", + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionBidCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + { + Key: evProviderKey, + Value: "yesss", + }, + { + Key: evPriceDenomKey, + Value: "uakt", + }, + { + Key: evPriceAmountKey, + Value: "23", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionBidCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + { + Key: evProviderKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evPriceDenomKey, + Value: "uakt", + }, + { + Key: evPriceAmountKey, + Value: "hello", + }, + }, + }, + expErr: errWildcard, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionBidClosed, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + { + Key: evProviderKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evPriceDenomKey, + Value: "uakt", + }, + { + Key: evPriceAmountKey, + Value: "23", + }, + }, + }, + expErr: nil, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionLeaseCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + { + Key: evProviderKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evPriceDenomKey, + Value: "uakt", + }, + { + Key: evPriceAmountKey, + Value: "23", + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionLeaseCreated, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + { + Key: evProviderKey, + Value: "hello", + }, + { + Key: evPriceDenomKey, + Value: "uakt", + }, + { + Key: evPriceAmountKey, + Value: "23", + }, + }, + }, + expErr: errWildcard, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: ModuleName, + Action: evActionLeaseClosed, + Attributes: []sdk.Attribute{ + { + Key: evOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evDSeqKey, + Value: "5", + }, + { + Key: evGSeqKey, + Value: "2", + }, + { + Key: evOSeqKey, + Value: "5", + }, + { + Key: evProviderKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + { + Key: evPriceDenomKey, + Value: "uakt", + }, + { + Key: evPriceAmountKey, + Value: "23", + }, + }, + }, + expErr: nil, + }, +} + +func TestEventParsing(t *testing.T) { + for i, test := range TEPS { + t.Run(fmt.Sprintf("%d", i), + test.testMessageType()) + } +} diff --git a/go/node/market/v1beta5/genesis.pb.go b/go/node/market/v1beta5/genesis.pb.go new file mode 100644 index 00000000..40a3235d --- /dev/null +++ b/go/node/market/v1beta5/genesis.pb.go @@ -0,0 +1,518 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/genesis.proto + +package v1beta5 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the basic genesis state used by market module +type GenesisState struct { + Params Params `protobuf:"bytes,1,opt,name=params,proto3" json:"params" yaml:"params"` + Orders []Order `protobuf:"bytes,2,rep,name=orders,proto3" json:"orders" yaml:"orders"` + Leases []Lease `protobuf:"bytes,3,rep,name=leases,proto3" json:"leases" yaml:"leases"` + Bids []Bid `protobuf:"bytes,4,rep,name=bids,proto3" json:"bids" yaml:"bids"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_73efc258394be6e9, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetParams() Params { + if m != nil { + return m.Params + } + return Params{} +} + +func (m *GenesisState) GetOrders() []Order { + if m != nil { + return m.Orders + } + return nil +} + +func (m *GenesisState) GetLeases() []Lease { + if m != nil { + return m.Leases + } + return nil +} + +func (m *GenesisState) GetBids() []Bid { + if m != nil { + return m.Bids + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "akash.market.v1beta5.GenesisState") +} + +func init() { + proto.RegisterFile("akash/market/v1beta5/genesis.proto", fileDescriptor_73efc258394be6e9) +} + +var fileDescriptor_73efc258394be6e9 = []byte{ + // 339 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x91, 0xb1, 0x4e, 0xeb, 0x30, + 0x14, 0x86, 0x93, 0xb6, 0xea, 0x90, 0xde, 0xbb, 0x44, 0x1d, 0x42, 0x8b, 0x9c, 0xe2, 0xa9, 0x0b, + 0xb6, 0x28, 0x62, 0x80, 0x31, 0x0b, 0x12, 0x42, 0x02, 0xa5, 0xb0, 0xb0, 0x39, 0xc4, 0x4a, 0xad, + 0x36, 0x75, 0x65, 0x1b, 0x10, 0x6f, 0xc1, 0x63, 0x75, 0xec, 0xc8, 0x42, 0x84, 0xda, 0x8d, 0xb1, + 0x4f, 0x80, 0x62, 0x5b, 0x8a, 0x84, 0xac, 0x6e, 0xf9, 0xf3, 0x7f, 0xe7, 0x4b, 0x8e, 0x1d, 0x40, + 0x32, 0x27, 0x72, 0x86, 0x4b, 0x22, 0xe6, 0x54, 0xe1, 0xd7, 0xb3, 0x8c, 0x2a, 0x72, 0x81, 0x0b, + 0xba, 0xa4, 0x92, 0x49, 0xb4, 0x12, 0x5c, 0xf1, 0xb0, 0xaf, 0x19, 0x64, 0x18, 0x64, 0x99, 0x41, + 0xbf, 0xe0, 0x05, 0xd7, 0x00, 0xae, 0x9f, 0x0c, 0x3b, 0x18, 0x39, 0x7d, 0x5c, 0xe4, 0x54, 0x1c, + 0x24, 0x16, 0x94, 0x48, 0x6a, 0x09, 0xe0, 0x24, 0x32, 0x96, 0xdb, 0xfe, 0xc4, 0xd9, 0xaf, 0x88, + 0x20, 0xa5, 0xfd, 0x65, 0xf8, 0xd5, 0x0a, 0xfe, 0x5d, 0x9b, 0x25, 0xa6, 0x8a, 0x28, 0x1a, 0x3e, + 0x06, 0x5d, 0x03, 0x44, 0xfe, 0xc8, 0x1f, 0xf7, 0x26, 0xc7, 0xc8, 0xb5, 0x14, 0xba, 0xd7, 0x4c, + 0x12, 0xaf, 0xab, 0xd8, 0xfb, 0xa9, 0x62, 0x3b, 0xb3, 0xaf, 0xe2, 0xff, 0xef, 0xa4, 0x5c, 0x5c, + 0x41, 0x93, 0x61, 0x6a, 0x8b, 0xf0, 0x21, 0xe8, 0xea, 0xdd, 0x64, 0xd4, 0x1a, 0xb5, 0xc7, 0xbd, + 0xc9, 0xd0, 0xad, 0xbd, 0xab, 0x99, 0xc6, 0x6a, 0x46, 0x1a, 0xab, 0xc9, 0x30, 0xb5, 0x45, 0x6d, + 0xd5, 0xe7, 0x21, 0xa3, 0xf6, 0x21, 0xeb, 0x6d, 0xcd, 0x34, 0x56, 0x33, 0xd2, 0x58, 0x4d, 0x86, + 0xa9, 0x2d, 0xc2, 0x9b, 0xa0, 0x93, 0xb1, 0x5c, 0x46, 0x1d, 0xed, 0x3c, 0x72, 0x3b, 0x13, 0x96, + 0x27, 0x43, 0x6b, 0xd4, 0xf8, 0xbe, 0x8a, 0x7b, 0xc6, 0x57, 0x27, 0x98, 0xea, 0x97, 0xc9, 0x74, + 0xbd, 0x05, 0xfe, 0x66, 0x0b, 0xfc, 0xef, 0x2d, 0xf0, 0x3f, 0x76, 0xc0, 0xdb, 0xec, 0x80, 0xf7, + 0xb9, 0x03, 0xde, 0xd3, 0x65, 0xc1, 0xd4, 0xec, 0x25, 0x43, 0xcf, 0xbc, 0xc4, 0xfa, 0x0b, 0xa7, + 0x4b, 0xaa, 0xde, 0xb8, 0x98, 0xdb, 0x44, 0x56, 0x0c, 0x17, 0x1c, 0x2f, 0x79, 0x4e, 0xff, 0xdc, + 0x60, 0xd6, 0xd5, 0x77, 0x77, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, 0xbc, 0x00, 0x05, 0x09, 0x94, + 0x02, 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Bids) > 0 { + for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if len(m.Leases) > 0 { + for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.Orders) > 0 { + for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Params.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Params.Size() + n += 1 + l + sovGenesis(uint64(l)) + if len(m.Orders) > 0 { + for _, e := range m.Orders { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Leases) > 0 { + for _, e := range m.Leases { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + if len(m.Bids) > 0 { + for _, e := range m.Bids { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Params", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Params.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Orders = append(m.Orders, Order{}) + if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, Lease{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bids = append(m.Bids, Bid{}) + if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/id.go b/go/node/market/v1beta5/id.go new file mode 100644 index 00000000..2d31a4df --- /dev/null +++ b/go/node/market/v1beta5/id.go @@ -0,0 +1,154 @@ +package v1beta5 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta4" +) + +// MakeOrderID returns OrderID instance with provided groupID details and oseq +func MakeOrderID(id dtypes.GroupID, oseq uint32) OrderID { + return OrderID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: id.GSeq, + OSeq: oseq, + } +} + +// GroupID method returns groupID details for specific order +func (id OrderID) GroupID() dtypes.GroupID { + return dtypes.GroupID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: id.GSeq, + } +} + +// Equals method compares specific order with provided order +func (id OrderID) Equals(other OrderID) bool { + return id.GroupID().Equals(other.GroupID()) && id.OSeq == other.OSeq +} + +// Validate method for OrderID and returns nil +func (id OrderID) Validate() error { + if err := id.GroupID().Validate(); err != nil { + return sdkerrors.Wrap(err, "OrderID: Invalid GroupID") + } + if id.OSeq == 0 { + return sdkerrors.Wrap(sdkerrors.ErrInvalidSequence, "OrderID: Invalid Order Sequence") + } + return nil +} + +// String provides stringer interface to save reflected formatting. +func (id OrderID) String() string { + return fmt.Sprintf("%s/%v", id.GroupID(), id.OSeq) +} + +// MakeBidID returns BidID instance with provided order details and provider +func MakeBidID(id OrderID, provider sdk.AccAddress) BidID { + return BidID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: id.GSeq, + OSeq: id.OSeq, + Provider: provider.String(), + } +} + +// Equals method compares specific bid with provided bid +func (id BidID) Equals(other BidID) bool { + return id.OrderID().Equals(other.OrderID()) && + id.Provider == other.Provider +} + +// LeaseID method returns lease details of bid +func (id BidID) LeaseID() LeaseID { + return LeaseID(id) +} + +// OrderID method returns OrderID details with specific bid details +func (id BidID) OrderID() OrderID { + return OrderID{ + Owner: id.Owner, + DSeq: id.DSeq, + GSeq: id.GSeq, + OSeq: id.OSeq, + } +} + +// String method for consistent output. +func (id BidID) String() string { + return fmt.Sprintf("%s/%v", id.OrderID(), id.Provider) +} + +// GroupID method returns GroupID details with specific bid details +func (id BidID) GroupID() dtypes.GroupID { + return id.OrderID().GroupID() +} + +// DeploymentID method returns deployment details with specific bid details +func (id BidID) DeploymentID() dtypes.DeploymentID { + return id.GroupID().DeploymentID() +} + +// Validate validates bid instance and returns nil +func (id BidID) Validate() error { + if err := id.OrderID().Validate(); err != nil { + return sdkerrors.Wrap(err, "BidID: Invalid OrderID") + } + if _, err := sdk.AccAddressFromBech32(id.Provider); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "BidID: Invalid Provider Address") + } + if id.Owner == id.Provider { + return sdkerrors.Wrap(sdkerrors.ErrConflict, "BidID: self-bid") + } + return nil +} + +// MakeLeaseID returns LeaseID instance with provided bid details +func MakeLeaseID(id BidID) LeaseID { + return LeaseID(id) +} + +// Equals method compares specific lease with provided lease +func (id LeaseID) Equals(other LeaseID) bool { + return id.BidID().Equals(other.BidID()) +} + +// Validate calls the BidID's validator and returns any error. +func (id LeaseID) Validate() error { + if err := id.BidID().Validate(); err != nil { + return sdkerrors.Wrap(err, "LeaseID: Invalid BidID") + } + return nil +} + +// BidID method returns BidID details with specific LeaseID +func (id LeaseID) BidID() BidID { + return BidID(id) +} + +// OrderID method returns OrderID details with specific lease details +func (id LeaseID) OrderID() OrderID { + return id.BidID().OrderID() +} + +// GroupID method returns GroupID details with specific lease details +func (id LeaseID) GroupID() dtypes.GroupID { + return id.OrderID().GroupID() +} + +// DeploymentID method returns deployment details with specific lease details +func (id LeaseID) DeploymentID() dtypes.DeploymentID { + return id.GroupID().DeploymentID() +} + +// String method provides human readable representation of LeaseID. +func (id LeaseID) String() string { + return id.BidID().String() +} diff --git a/go/node/market/v1beta5/key.go b/go/node/market/v1beta5/key.go new file mode 100644 index 00000000..a0b22076 --- /dev/null +++ b/go/node/market/v1beta5/key.go @@ -0,0 +1,28 @@ +package v1beta5 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "market" + + // StoreKey is the store key string for market + StoreKey = ModuleName + + // RouterKey is the message route for market + RouterKey = ModuleName +) + +func OrderPrefix() []byte { + return []byte{0x01, 0x00} +} + +func BidPrefix() []byte { + return []byte{0x02, 0x00} +} + +func LeasePrefix() []byte { + return []byte{0x03, 0x00} +} + +func SecondaryLeasePrefix() []byte { + return []byte{0x03, 0x01} +} diff --git a/go/node/market/v1beta5/lease.pb.go b/go/node/market/v1beta5/lease.pb.go new file mode 100644 index 00000000..cf809bdc --- /dev/null +++ b/go/node/market/v1beta5/lease.pb.go @@ -0,0 +1,2134 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/lease.proto + +package v1beta5 + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of lease +type Lease_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + LeaseStateInvalid Lease_State = 0 + // LeaseActive denotes state for lease active + LeaseActive Lease_State = 1 + // LeaseInsufficientFunds denotes state for lease insufficient_funds + LeaseInsufficientFunds Lease_State = 2 + // LeaseClosed denotes state for lease closed + LeaseClosed Lease_State = 3 +) + +var Lease_State_name = map[int32]string{ + 0: "invalid", + 1: "active", + 2: "insufficient_funds", + 3: "closed", +} + +var Lease_State_value = map[string]int32{ + "invalid": 0, + "active": 1, + "insufficient_funds": 2, + "closed": 3, +} + +func (x Lease_State) String() string { + return proto.EnumName(Lease_State_name, int32(x)) +} + +func (Lease_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{1, 0} +} + +// LeaseID stores bid details of lease +type LeaseID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` +} + +func (m *LeaseID) Reset() { *m = LeaseID{} } +func (*LeaseID) ProtoMessage() {} +func (*LeaseID) Descriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{0} +} +func (m *LeaseID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseID) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseID.Merge(m, src) +} +func (m *LeaseID) XXX_Size() int { + return m.Size() +} +func (m *LeaseID) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseID.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseID proto.InternalMessageInfo + +func (m *LeaseID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *LeaseID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *LeaseID) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *LeaseID) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *LeaseID) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +// Lease stores LeaseID, state of lease and price +type Lease struct { + LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` + State Lease_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta5.Lease_State" json:"state" yaml:"state"` + Price types.DecCoin `protobuf:"bytes,3,opt,name=price,proto3" json:"price" yaml:"price"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` + ClosedOn int64 `protobuf:"varint,5,opt,name=closed_on,json=closedOn,proto3" json:"closed_on,omitempty"` +} + +func (m *Lease) Reset() { *m = Lease{} } +func (*Lease) ProtoMessage() {} +func (*Lease) Descriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{1} +} +func (m *Lease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Lease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Lease.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Lease) XXX_Merge(src proto.Message) { + xxx_messageInfo_Lease.Merge(m, src) +} +func (m *Lease) XXX_Size() int { + return m.Size() +} +func (m *Lease) XXX_DiscardUnknown() { + xxx_messageInfo_Lease.DiscardUnknown(m) +} + +var xxx_messageInfo_Lease proto.InternalMessageInfo + +func (m *Lease) GetLeaseID() LeaseID { + if m != nil { + return m.LeaseID + } + return LeaseID{} +} + +func (m *Lease) GetState() Lease_State { + if m != nil { + return m.State + } + return LeaseStateInvalid +} + +func (m *Lease) GetPrice() types.DecCoin { + if m != nil { + return m.Price + } + return types.DecCoin{} +} + +func (m *Lease) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +func (m *Lease) GetClosedOn() int64 { + if m != nil { + return m.ClosedOn + } + return 0 +} + +// LeaseFilters defines flags for lease list filter +type LeaseFilters struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + Provider string `protobuf:"bytes,5,opt,name=provider,proto3" json:"provider" yaml:"provider"` + State string `protobuf:"bytes,6,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *LeaseFilters) Reset() { *m = LeaseFilters{} } +func (m *LeaseFilters) String() string { return proto.CompactTextString(m) } +func (*LeaseFilters) ProtoMessage() {} +func (*LeaseFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{2} +} +func (m *LeaseFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LeaseFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_LeaseFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *LeaseFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_LeaseFilters.Merge(m, src) +} +func (m *LeaseFilters) XXX_Size() int { + return m.Size() +} +func (m *LeaseFilters) XXX_DiscardUnknown() { + xxx_messageInfo_LeaseFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_LeaseFilters proto.InternalMessageInfo + +func (m *LeaseFilters) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *LeaseFilters) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *LeaseFilters) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *LeaseFilters) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *LeaseFilters) GetProvider() string { + if m != nil { + return m.Provider + } + return "" +} + +func (m *LeaseFilters) GetState() string { + if m != nil { + return m.State + } + return "" +} + +// MsgCreateLease is sent to create a lease +type MsgCreateLease struct { + BidID BidID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCreateLease) Reset() { *m = MsgCreateLease{} } +func (m *MsgCreateLease) String() string { return proto.CompactTextString(m) } +func (*MsgCreateLease) ProtoMessage() {} +func (*MsgCreateLease) Descriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{3} +} +func (m *MsgCreateLease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateLease.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateLease) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateLease.Merge(m, src) +} +func (m *MsgCreateLease) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateLease) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateLease.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateLease proto.InternalMessageInfo + +func (m *MsgCreateLease) GetBidID() BidID { + if m != nil { + return m.BidID + } + return BidID{} +} + +// MsgCreateLeaseResponse is the response from creating a lease +type MsgCreateLeaseResponse struct { +} + +func (m *MsgCreateLeaseResponse) Reset() { *m = MsgCreateLeaseResponse{} } +func (m *MsgCreateLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateLeaseResponse) ProtoMessage() {} +func (*MsgCreateLeaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{4} +} +func (m *MsgCreateLeaseResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateLeaseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateLeaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateLeaseResponse.Merge(m, src) +} +func (m *MsgCreateLeaseResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateLeaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateLeaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateLeaseResponse proto.InternalMessageInfo + +// MsgWithdrawLease defines an SDK message for closing bid +type MsgWithdrawLease struct { + LeaseID LeaseID `protobuf:"bytes,1,opt,name=bid_id,json=bidId,proto3" json:"id" yaml:"id"` +} + +func (m *MsgWithdrawLease) Reset() { *m = MsgWithdrawLease{} } +func (m *MsgWithdrawLease) String() string { return proto.CompactTextString(m) } +func (*MsgWithdrawLease) ProtoMessage() {} +func (*MsgWithdrawLease) Descriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{5} +} +func (m *MsgWithdrawLease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawLease.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawLease) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawLease.Merge(m, src) +} +func (m *MsgWithdrawLease) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawLease) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawLease.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawLease proto.InternalMessageInfo + +func (m *MsgWithdrawLease) GetLeaseID() LeaseID { + if m != nil { + return m.LeaseID + } + return LeaseID{} +} + +// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. +type MsgWithdrawLeaseResponse struct { +} + +func (m *MsgWithdrawLeaseResponse) Reset() { *m = MsgWithdrawLeaseResponse{} } +func (m *MsgWithdrawLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*MsgWithdrawLeaseResponse) ProtoMessage() {} +func (*MsgWithdrawLeaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{6} +} +func (m *MsgWithdrawLeaseResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgWithdrawLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgWithdrawLeaseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgWithdrawLeaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgWithdrawLeaseResponse.Merge(m, src) +} +func (m *MsgWithdrawLeaseResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgWithdrawLeaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgWithdrawLeaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgWithdrawLeaseResponse proto.InternalMessageInfo + +// MsgCloseLease defines an SDK message for closing order +type MsgCloseLease struct { + LeaseID LeaseID `protobuf:"bytes,1,opt,name=lease_id,json=leaseId,proto3" json:"id" yaml:"id"` +} + +func (m *MsgCloseLease) Reset() { *m = MsgCloseLease{} } +func (m *MsgCloseLease) String() string { return proto.CompactTextString(m) } +func (*MsgCloseLease) ProtoMessage() {} +func (*MsgCloseLease) Descriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{7} +} +func (m *MsgCloseLease) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseLease) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseLease.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseLease) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseLease.Merge(m, src) +} +func (m *MsgCloseLease) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseLease) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseLease.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseLease proto.InternalMessageInfo + +func (m *MsgCloseLease) GetLeaseID() LeaseID { + if m != nil { + return m.LeaseID + } + return LeaseID{} +} + +// MsgCloseLeaseResponse defines the Msg/CloseLease response type. +type MsgCloseLeaseResponse struct { +} + +func (m *MsgCloseLeaseResponse) Reset() { *m = MsgCloseLeaseResponse{} } +func (m *MsgCloseLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCloseLeaseResponse) ProtoMessage() {} +func (*MsgCloseLeaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_c206e3cb96afe42c, []int{8} +} +func (m *MsgCloseLeaseResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCloseLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCloseLeaseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCloseLeaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCloseLeaseResponse.Merge(m, src) +} +func (m *MsgCloseLeaseResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCloseLeaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCloseLeaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCloseLeaseResponse proto.InternalMessageInfo + +func init() { + proto.RegisterEnum("akash.market.v1beta5.Lease_State", Lease_State_name, Lease_State_value) + proto.RegisterType((*LeaseID)(nil), "akash.market.v1beta5.LeaseID") + proto.RegisterType((*Lease)(nil), "akash.market.v1beta5.Lease") + proto.RegisterType((*LeaseFilters)(nil), "akash.market.v1beta5.LeaseFilters") + proto.RegisterType((*MsgCreateLease)(nil), "akash.market.v1beta5.MsgCreateLease") + proto.RegisterType((*MsgCreateLeaseResponse)(nil), "akash.market.v1beta5.MsgCreateLeaseResponse") + proto.RegisterType((*MsgWithdrawLease)(nil), "akash.market.v1beta5.MsgWithdrawLease") + proto.RegisterType((*MsgWithdrawLeaseResponse)(nil), "akash.market.v1beta5.MsgWithdrawLeaseResponse") + proto.RegisterType((*MsgCloseLease)(nil), "akash.market.v1beta5.MsgCloseLease") + proto.RegisterType((*MsgCloseLeaseResponse)(nil), "akash.market.v1beta5.MsgCloseLeaseResponse") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/lease.proto", fileDescriptor_c206e3cb96afe42c) } + +var fileDescriptor_c206e3cb96afe42c = []byte{ + // 757 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x55, 0x4f, 0x4f, 0xdb, 0x4a, + 0x10, 0xb7, 0x93, 0x38, 0x24, 0x1b, 0xfe, 0xe4, 0x59, 0xc0, 0xcb, 0x33, 0x0f, 0xdb, 0xf5, 0x89, + 0x4b, 0x6d, 0x11, 0xd4, 0x43, 0xe9, 0x89, 0x10, 0x51, 0x45, 0x2a, 0x42, 0x35, 0x95, 0x5a, 0x55, + 0x95, 0x22, 0xc7, 0xbb, 0x98, 0x15, 0x89, 0x37, 0x78, 0x4d, 0x50, 0xbf, 0x41, 0xc5, 0xa9, 0xc7, + 0x5e, 0x50, 0x91, 0xfa, 0x65, 0x38, 0xa2, 0x9e, 0x7a, 0xb2, 0xaa, 0x70, 0xa9, 0x72, 0x8c, 0xfa, + 0x01, 0xaa, 0xdd, 0x75, 0x48, 0x82, 0x22, 0x4e, 0x55, 0x4f, 0x3d, 0x25, 0xf3, 0x9b, 0xf9, 0xcd, + 0x8c, 0x7f, 0x33, 0x63, 0x03, 0xd3, 0x3b, 0xf1, 0xe8, 0xb1, 0xd3, 0xf1, 0xa2, 0x13, 0x14, 0x3b, + 0xbd, 0xcd, 0x16, 0x8a, 0xbd, 0x27, 0x4e, 0x1b, 0x79, 0x14, 0xd9, 0xdd, 0x88, 0xc4, 0x44, 0x5d, + 0xe6, 0x11, 0xb6, 0x88, 0xb0, 0xd3, 0x08, 0x6d, 0x39, 0x20, 0x01, 0xe1, 0x01, 0x0e, 0xfb, 0x27, + 0x62, 0x35, 0xdd, 0x27, 0xb4, 0x43, 0xa8, 0xd3, 0xf2, 0x28, 0x4a, 0x93, 0x6d, 0x3a, 0x3e, 0xc1, + 0xe1, 0xc8, 0x3f, 0xb3, 0x5a, 0x0b, 0x43, 0xe1, 0xb7, 0xae, 0x32, 0x60, 0xee, 0x05, 0xab, 0xdd, + 0xa8, 0xab, 0x0e, 0x50, 0xc8, 0x79, 0x88, 0xa2, 0x8a, 0x6c, 0xca, 0x1b, 0xc5, 0xda, 0x7f, 0x83, + 0xc4, 0x10, 0xc0, 0x30, 0x31, 0xe6, 0xdf, 0x7b, 0x9d, 0xf6, 0xb6, 0xc5, 0x4d, 0xcb, 0x15, 0xb0, + 0xba, 0x05, 0x72, 0x90, 0xa2, 0xd3, 0x4a, 0xc6, 0x94, 0x37, 0x72, 0x35, 0xa3, 0x9f, 0x18, 0xb9, + 0xfa, 0x21, 0x3a, 0x1d, 0x24, 0x06, 0xc7, 0x87, 0x89, 0x51, 0x12, 0x34, 0x66, 0x59, 0x2e, 0x07, + 0x19, 0x29, 0x60, 0xa4, 0xac, 0x29, 0x6f, 0x2c, 0x08, 0xd2, 0xf3, 0x94, 0x14, 0x4c, 0x91, 0x02, + 0x41, 0x0a, 0x52, 0x12, 0x61, 0xa4, 0xdc, 0x98, 0x74, 0x90, 0x92, 0xc8, 0x14, 0x89, 0x08, 0x12, + 0xfb, 0x51, 0x9f, 0x81, 0x42, 0x37, 0x22, 0x3d, 0x0c, 0x51, 0x54, 0x51, 0xf8, 0x23, 0x19, 0x83, + 0xc4, 0xb8, 0xc3, 0x86, 0x89, 0xb1, 0x24, 0x48, 0x23, 0xc4, 0x72, 0xef, 0x9c, 0xdb, 0x85, 0x4f, + 0x57, 0x86, 0xf4, 0xe3, 0xca, 0x90, 0xac, 0x9f, 0x59, 0xa0, 0x70, 0x89, 0xd4, 0x77, 0xa0, 0xc0, + 0xe7, 0xd4, 0xc4, 0x90, 0x6b, 0x54, 0xaa, 0xae, 0xdb, 0xb3, 0x66, 0x65, 0xa7, 0x8a, 0xd6, 0xac, + 0xeb, 0xc4, 0x90, 0xfa, 0x89, 0x31, 0x92, 0x78, 0x90, 0x18, 0x19, 0x0c, 0x87, 0x89, 0x51, 0x14, + 0x85, 0x31, 0xb4, 0xdc, 0x39, 0x9e, 0xb2, 0x01, 0x55, 0x17, 0x28, 0x34, 0xf6, 0x62, 0xc4, 0xe5, + 0x5c, 0xac, 0x3e, 0x7a, 0x20, 0xb5, 0x7d, 0xc8, 0x02, 0xc5, 0x84, 0x38, 0x67, 0x3c, 0x21, 0x6e, + 0x5a, 0xae, 0x80, 0xd5, 0x97, 0x40, 0xe9, 0x46, 0xd8, 0x47, 0x5c, 0xed, 0x52, 0xf5, 0x7f, 0x5b, + 0xac, 0x8b, 0xcd, 0xd6, 0x25, 0x4d, 0xb9, 0x69, 0xd7, 0x91, 0xbf, 0x4b, 0x70, 0x58, 0x5b, 0x67, + 0xdd, 0xb2, 0x94, 0x9c, 0x32, 0x4e, 0xc9, 0x4d, 0xcb, 0x15, 0xb0, 0xba, 0x0e, 0x80, 0x1f, 0x21, + 0x2f, 0x46, 0xb0, 0xe9, 0xc5, 0x7c, 0x20, 0x59, 0xb7, 0x98, 0x22, 0x3b, 0xb1, 0xba, 0x06, 0x8a, + 0x7e, 0x9b, 0x50, 0x04, 0x9b, 0x24, 0xe4, 0xaa, 0x67, 0xdd, 0x82, 0x00, 0x0e, 0x42, 0xeb, 0xb3, + 0x0c, 0x14, 0xde, 0xba, 0x6a, 0x81, 0x39, 0x1c, 0xf6, 0xbc, 0x36, 0x86, 0x65, 0x49, 0x5b, 0xb9, + 0xb8, 0x34, 0xff, 0xe1, 0x0f, 0xc6, 0x9d, 0x0d, 0xe1, 0x50, 0xd7, 0x40, 0xde, 0xf3, 0x63, 0xdc, + 0x43, 0x65, 0x59, 0x5b, 0xba, 0xb8, 0x34, 0x4b, 0x3c, 0x64, 0x87, 0x43, 0x6a, 0x15, 0xa8, 0x38, + 0xa4, 0x67, 0x47, 0x47, 0xd8, 0xc7, 0x28, 0x8c, 0x9b, 0x47, 0x67, 0x21, 0xa4, 0xe5, 0x8c, 0xa6, + 0x5d, 0x5c, 0x9a, 0xab, 0x42, 0xee, 0x09, 0xf7, 0x1e, 0xf3, 0xb2, 0x84, 0xa2, 0x95, 0x72, 0x76, + 0x22, 0xe1, 0x2e, 0x87, 0xb4, 0xdc, 0x87, 0x2f, 0xba, 0x34, 0x31, 0xf6, 0xaf, 0x19, 0x30, 0xcf, + 0xfd, 0x7b, 0xb8, 0x1d, 0xa3, 0x88, 0xfe, 0x3d, 0x8f, 0x89, 0xf3, 0x60, 0x62, 0x88, 0x65, 0xcd, + 0x8f, 0xc5, 0x78, 0x68, 0x13, 0xb7, 0x73, 0x5c, 0xd4, 0x36, 0x58, 0xdc, 0xa7, 0xc1, 0x2e, 0xdf, + 0x16, 0x71, 0x53, 0xaf, 0x40, 0xbe, 0x85, 0xe1, 0xf8, 0xa2, 0xd6, 0x66, 0xaf, 0x7d, 0x0d, 0xc3, + 0x46, 0xbd, 0x66, 0xa6, 0xf7, 0xa4, 0x70, 0x73, 0xd6, 0x35, 0x29, 0x2d, 0x0c, 0x1b, 0x30, 0xad, + 0x56, 0x01, 0xab, 0xd3, 0xd5, 0x5c, 0x44, 0xbb, 0x24, 0xa4, 0xc8, 0x8a, 0x40, 0x79, 0x9f, 0x06, + 0xaf, 0x71, 0x7c, 0x0c, 0x23, 0xef, 0x5c, 0x74, 0xf2, 0xe6, 0x5e, 0x27, 0xbf, 0xe1, 0xb6, 0xa7, + 0xba, 0xd1, 0x40, 0xe5, 0x7e, 0xcd, 0xbb, 0x7e, 0x28, 0x58, 0x60, 0x9d, 0xb2, 0x4d, 0xfc, 0x03, + 0xaf, 0x9a, 0xb4, 0xa1, 0x7f, 0xc1, 0xca, 0x54, 0xd1, 0x51, 0x37, 0xb5, 0xc3, 0xeb, 0xbe, 0x2e, + 0xdf, 0xf4, 0x75, 0xf9, 0x7b, 0x5f, 0x97, 0x3f, 0xde, 0xea, 0xd2, 0xcd, 0xad, 0x2e, 0x7d, 0xbb, + 0xd5, 0xa5, 0xb7, 0x4f, 0x03, 0x1c, 0x1f, 0x9f, 0xb5, 0x6c, 0x9f, 0x74, 0x1c, 0xde, 0xce, 0xe3, + 0x10, 0xc5, 0xe7, 0x24, 0x3a, 0x49, 0x2d, 0xaf, 0x8b, 0x9d, 0x80, 0x38, 0x21, 0x81, 0xe8, 0xde, + 0x37, 0xa7, 0x95, 0xe7, 0x1f, 0x9c, 0xad, 0x5f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x11, 0xc3, 0x3a, + 0x00, 0x00, 0x07, 0x00, 0x00, +} + +func (m *LeaseID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Lease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Lease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Lease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.ClosedOn != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.ClosedOn)) + i-- + dAtA[i] = 0x28 + } + if m.CreatedAt != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.Price.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLease(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.State != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLease(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *LeaseFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LeaseFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LeaseFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintLease(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x32 + } + if len(m.Provider) > 0 { + i -= len(m.Provider) + copy(dAtA[i:], m.Provider) + i = encodeVarintLease(dAtA, i, uint64(len(m.Provider))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintLease(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintLease(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateLease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateLease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.BidID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLease(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCreateLeaseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateLeaseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawLease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawLease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLease(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgWithdrawLeaseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgWithdrawLeaseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgWithdrawLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgCloseLease) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseLease) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseLease) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.LeaseID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintLease(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *MsgCloseLeaseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCloseLeaseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCloseLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func encodeVarintLease(dAtA []byte, offset int, v uint64) int { + offset -= sovLease(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *LeaseID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovLease(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovLease(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovLease(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovLease(uint64(m.OSeq)) + } + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovLease(uint64(l)) + } + return n +} + +func (m *Lease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LeaseID.Size() + n += 1 + l + sovLease(uint64(l)) + if m.State != 0 { + n += 1 + sovLease(uint64(m.State)) + } + l = m.Price.Size() + n += 1 + l + sovLease(uint64(l)) + if m.CreatedAt != 0 { + n += 1 + sovLease(uint64(m.CreatedAt)) + } + if m.ClosedOn != 0 { + n += 1 + sovLease(uint64(m.ClosedOn)) + } + return n +} + +func (m *LeaseFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovLease(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovLease(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovLease(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovLease(uint64(m.OSeq)) + } + l = len(m.Provider) + if l > 0 { + n += 1 + l + sovLease(uint64(l)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovLease(uint64(l)) + } + return n +} + +func (m *MsgCreateLease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BidID.Size() + n += 1 + l + sovLease(uint64(l)) + return n +} + +func (m *MsgCreateLeaseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgWithdrawLease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LeaseID.Size() + n += 1 + l + sovLease(uint64(l)) + return n +} + +func (m *MsgWithdrawLeaseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgCloseLease) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.LeaseID.Size() + n += 1 + l + sovLease(uint64(l)) + return n +} + +func (m *MsgCloseLeaseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func sovLease(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozLease(x uint64) (n int) { + return sovLease(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *LeaseID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Lease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Lease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Lease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Lease_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Price", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Price.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ClosedOn", wireType) + } + m.ClosedOn = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ClosedOn |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LeaseFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LeaseFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LeaseFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Provider = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateLease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateLease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateLease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BidID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BidID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateLeaseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateLeaseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawLease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawLease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawLease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgWithdrawLeaseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgWithdrawLeaseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgWithdrawLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseLease) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseLease: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseLease: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field LeaseID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthLease + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthLease + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.LeaseID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCloseLeaseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowLease + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCloseLeaseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCloseLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipLease(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthLease + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipLease(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowLease + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthLease + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupLease + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthLease + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthLease = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowLease = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupLease = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/migrate/v1beta4.go b/go/node/market/v1beta5/migrate/v1beta4.go new file mode 100644 index 00000000..2f80caf0 --- /dev/null +++ b/go/node/market/v1beta5/migrate/v1beta4.go @@ -0,0 +1,40 @@ +package migrate + +import ( + "github.com/akash-network/akash-api/go/node/market/v1beta4" + "github.com/akash-network/akash-api/go/node/market/v1beta5" +) + +func BidStateFromV1beta4(from v1beta4.Bid_State) v1beta5.Bid_State { + return v1beta5.Bid_State(from) +} + +func LeaseIDFromV1beta4(from v1beta4.LeaseID) v1beta5.LeaseID { + return v1beta5.LeaseID{ + Owner: from.Owner, + DSeq: from.DSeq, + GSeq: from.GSeq, + OSeq: from.OSeq, + Provider: from.Provider, + } +} + +func BidIDFromV1beta4(from v1beta4.BidID) v1beta5.BidID { + return v1beta5.BidID{ + Owner: from.Owner, + DSeq: from.DSeq, + GSeq: from.GSeq, + OSeq: from.OSeq, + Provider: from.Provider, + } +} + +func BidFromV1beta3(from v1beta4.Bid) v1beta5.Bid { + return v1beta5.Bid{ + BidID: BidIDFromV1beta4(from.BidID), + State: BidStateFromV1beta4(from.State), + Price: from.Price, + CreatedAt: from.CreatedAt, + ResourcesOffer: v1beta5.ResourcesOffer{}, + } +} diff --git a/go/node/market/v1beta5/msgs.go b/go/node/market/v1beta5/msgs.go new file mode 100644 index 00000000..a272d4e1 --- /dev/null +++ b/go/node/market/v1beta5/msgs.go @@ -0,0 +1,217 @@ +package v1beta5 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +const ( + MsgTypeCreateBid = "create-bid" + MsgTypeCloseBid = "close-bid" + MsgTypeCreateLease = "create-lease" + MsgTypeWithdrawLease = "withdraw-lease" + MsgTypeCloseLease = "close-lease" +) + +var ( + _ sdk.Msg = &MsgCreateBid{} + _ sdk.Msg = &MsgCloseBid{} + _ sdk.Msg = &MsgCreateLease{} + _ sdk.Msg = &MsgWithdrawLease{} + _ sdk.Msg = &MsgCloseLease{} +) + +// NewMsgCreateBid creates a new MsgCreateBid instance +func NewMsgCreateBid(id OrderID, provider sdk.AccAddress, price sdk.DecCoin, deposit sdk.Coin, roffer ResourcesOffer) *MsgCreateBid { + return &MsgCreateBid{ + Order: id, + Provider: provider.String(), + Price: price, + Deposit: deposit, + ResourcesOffer: roffer, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgCreateBid) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgCreateBid) Type() string { return MsgTypeCreateBid } + +// GetSignBytes encodes the message for signing +func (msg MsgCreateBid) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgCreateBid) GetSigners() []sdk.AccAddress { + provider, err := sdk.AccAddressFromBech32(msg.Provider) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{provider} +} + +// ValidateBasic does basic validation of provider and order +func (msg MsgCreateBid) ValidateBasic() error { + if err := msg.Order.Validate(); err != nil { + return err + } + + provider, err := sdk.AccAddressFromBech32(msg.Provider) + if err != nil { + return ErrEmptyProvider + } + + owner, err := sdk.AccAddressFromBech32(msg.Order.Owner) + if err != nil { + return fmt.Errorf("%w: empty owner", ErrInvalidBid) + } + + if provider.Equals(owner) { + return ErrSameAccount + } + + if msg.Price.IsZero() { + return ErrBidZeroPrice + } + + return nil +} + +// NewMsgWithdrawLease creates a new MsgWithdrawLease instance +func NewMsgWithdrawLease(id LeaseID) *MsgWithdrawLease { + return &MsgWithdrawLease{ + LeaseID: id, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgWithdrawLease) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgWithdrawLease) Type() string { return MsgTypeWithdrawLease } + +// GetSignBytes encodes the message for signing +func (msg MsgWithdrawLease) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgWithdrawLease) GetSigners() []sdk.AccAddress { + provider, err := sdk.AccAddressFromBech32(msg.GetLeaseID().Provider) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{provider} +} + +// ValidateBasic does basic validation of provider and order +func (msg MsgWithdrawLease) ValidateBasic() error { + if err := msg.LeaseID.Validate(); err != nil { + return err + } + return nil +} + +// NewMsgCreateLease creates a new MsgCreateLease instance +func NewMsgCreateLease(id BidID) *MsgCreateLease { + return &MsgCreateLease{ + BidID: id, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgCreateLease) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgCreateLease) Type() string { return MsgTypeCreateLease } + +// GetSignBytes encodes the message for signing +func (msg MsgCreateLease) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgCreateLease) GetSigners() []sdk.AccAddress { + provider, err := sdk.AccAddressFromBech32(msg.BidID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{provider} +} + +// ValidateBasic method for MsgCreateLease +func (msg MsgCreateLease) ValidateBasic() error { + return msg.BidID.Validate() +} + +// NewMsgCloseBid creates a new MsgCloseBid instance +func NewMsgCloseBid(id BidID) *MsgCloseBid { + return &MsgCloseBid{ + BidID: id, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgCloseBid) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgCloseBid) Type() string { return MsgTypeCloseBid } + +// GetSignBytes encodes the message for signing +func (msg MsgCloseBid) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgCloseBid) GetSigners() []sdk.AccAddress { + provider, err := sdk.AccAddressFromBech32(msg.BidID.Provider) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{provider} +} + +// ValidateBasic method for MsgCloseBid +func (msg MsgCloseBid) ValidateBasic() error { + return msg.BidID.Validate() +} + +// NewMsgCloseLease creates a new MsgCloseLease instance +func NewMsgCloseLease(id LeaseID) *MsgCloseLease { + return &MsgCloseLease{ + LeaseID: id, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgCloseLease) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgCloseLease) Type() string { return MsgTypeCloseLease } + +// GetSignBytes encodes the message for signing +func (msg MsgCloseLease) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgCloseLease) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.LeaseID.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// ValidateBasic method for MsgCloseLease +func (msg MsgCloseLease) ValidateBasic() error { + return msg.LeaseID.Validate() +} diff --git a/go/node/market/v1beta5/order.pb.go b/go/node/market/v1beta5/order.pb.go new file mode 100644 index 00000000..a9fae90e --- /dev/null +++ b/go/node/market/v1beta5/order.pb.go @@ -0,0 +1,1107 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/order.proto + +package v1beta5 + +import ( + fmt "fmt" + v1beta4 "github.com/akash-network/akash-api/go/node/deployment/v1beta4" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// State is an enum which refers to state of order +type Order_State int32 + +const ( + // Prefix should start with 0 in enum. So declaring dummy state + OrderStateInvalid Order_State = 0 + // OrderOpen denotes state for order open + OrderOpen Order_State = 1 + // OrderMatched denotes state for order matched + OrderActive Order_State = 2 + // OrderClosed denotes state for order lost + OrderClosed Order_State = 3 +) + +var Order_State_name = map[int32]string{ + 0: "invalid", + 1: "open", + 2: "active", + 3: "closed", +} + +var Order_State_value = map[string]int32{ + "invalid": 0, + "open": 1, + "active": 2, + "closed": 3, +} + +func (x Order_State) String() string { + return proto.EnumName(Order_State_name, int32(x)) +} + +func (Order_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_a72454f2c693d67f, []int{1, 0} +} + +// OrderID stores owner and all other seq numbers +type OrderID struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` +} + +func (m *OrderID) Reset() { *m = OrderID{} } +func (*OrderID) ProtoMessage() {} +func (*OrderID) Descriptor() ([]byte, []int) { + return fileDescriptor_a72454f2c693d67f, []int{0} +} +func (m *OrderID) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OrderID) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OrderID.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OrderID) XXX_Merge(src proto.Message) { + xxx_messageInfo_OrderID.Merge(m, src) +} +func (m *OrderID) XXX_Size() int { + return m.Size() +} +func (m *OrderID) XXX_DiscardUnknown() { + xxx_messageInfo_OrderID.DiscardUnknown(m) +} + +var xxx_messageInfo_OrderID proto.InternalMessageInfo + +func (m *OrderID) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *OrderID) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *OrderID) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *OrderID) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +// Order stores orderID, state of order and other details +type Order struct { + OrderID OrderID `protobuf:"bytes,1,opt,name=order_id,json=orderId,proto3" json:"id" yaml:"id"` + State Order_State `protobuf:"varint,2,opt,name=state,proto3,enum=akash.market.v1beta5.Order_State" json:"state" yaml:"state"` + Spec v1beta4.GroupSpec `protobuf:"bytes,3,opt,name=spec,proto3" json:"spec" yaml:"spec"` + CreatedAt int64 `protobuf:"varint,4,opt,name=created_at,json=createdAt,proto3" json:"created_at,omitempty"` +} + +func (m *Order) Reset() { *m = Order{} } +func (*Order) ProtoMessage() {} +func (*Order) Descriptor() ([]byte, []int) { + return fileDescriptor_a72454f2c693d67f, []int{1} +} +func (m *Order) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Order) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Order.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Order) XXX_Merge(src proto.Message) { + xxx_messageInfo_Order.Merge(m, src) +} +func (m *Order) XXX_Size() int { + return m.Size() +} +func (m *Order) XXX_DiscardUnknown() { + xxx_messageInfo_Order.DiscardUnknown(m) +} + +var xxx_messageInfo_Order proto.InternalMessageInfo + +func (m *Order) GetOrderID() OrderID { + if m != nil { + return m.OrderID + } + return OrderID{} +} + +func (m *Order) GetState() Order_State { + if m != nil { + return m.State + } + return OrderStateInvalid +} + +func (m *Order) GetSpec() v1beta4.GroupSpec { + if m != nil { + return m.Spec + } + return v1beta4.GroupSpec{} +} + +func (m *Order) GetCreatedAt() int64 { + if m != nil { + return m.CreatedAt + } + return 0 +} + +// OrderFilters defines flags for order list filter +type OrderFilters struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + DSeq uint64 `protobuf:"varint,2,opt,name=dseq,proto3" json:"dseq" yaml:"dseq"` + GSeq uint32 `protobuf:"varint,3,opt,name=gseq,proto3" json:"gseq" yaml:"gseq"` + OSeq uint32 `protobuf:"varint,4,opt,name=oseq,proto3" json:"oseq" yaml:"oseq"` + State string `protobuf:"bytes,5,opt,name=state,proto3" json:"state" yaml:"state"` +} + +func (m *OrderFilters) Reset() { *m = OrderFilters{} } +func (m *OrderFilters) String() string { return proto.CompactTextString(m) } +func (*OrderFilters) ProtoMessage() {} +func (*OrderFilters) Descriptor() ([]byte, []int) { + return fileDescriptor_a72454f2c693d67f, []int{2} +} +func (m *OrderFilters) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *OrderFilters) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_OrderFilters.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *OrderFilters) XXX_Merge(src proto.Message) { + xxx_messageInfo_OrderFilters.Merge(m, src) +} +func (m *OrderFilters) XXX_Size() int { + return m.Size() +} +func (m *OrderFilters) XXX_DiscardUnknown() { + xxx_messageInfo_OrderFilters.DiscardUnknown(m) +} + +var xxx_messageInfo_OrderFilters proto.InternalMessageInfo + +func (m *OrderFilters) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *OrderFilters) GetDSeq() uint64 { + if m != nil { + return m.DSeq + } + return 0 +} + +func (m *OrderFilters) GetGSeq() uint32 { + if m != nil { + return m.GSeq + } + return 0 +} + +func (m *OrderFilters) GetOSeq() uint32 { + if m != nil { + return m.OSeq + } + return 0 +} + +func (m *OrderFilters) GetState() string { + if m != nil { + return m.State + } + return "" +} + +func init() { + proto.RegisterEnum("akash.market.v1beta5.Order_State", Order_State_name, Order_State_value) + proto.RegisterType((*OrderID)(nil), "akash.market.v1beta5.OrderID") + proto.RegisterType((*Order)(nil), "akash.market.v1beta5.Order") + proto.RegisterType((*OrderFilters)(nil), "akash.market.v1beta5.OrderFilters") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/order.proto", fileDescriptor_a72454f2c693d67f) } + +var fileDescriptor_a72454f2c693d67f = []byte{ + // 588 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xb1, 0x6b, 0xdb, 0x4e, + 0x14, 0x96, 0x6c, 0x39, 0x89, 0xcf, 0xc9, 0xef, 0xe7, 0x8a, 0x94, 0xa6, 0x0a, 0xd1, 0xa9, 0xea, + 0xe2, 0xa5, 0x12, 0x75, 0xd2, 0xa1, 0xde, 0xe2, 0x86, 0x06, 0x4f, 0x06, 0xb9, 0x53, 0x29, 0x04, + 0x59, 0x77, 0x28, 0xc2, 0xb6, 0x4e, 0x91, 0x2e, 0x0e, 0xd9, 0x3b, 0x14, 0x4f, 0x5d, 0x0a, 0x5d, + 0x0c, 0x81, 0xfe, 0x21, 0x5d, 0x33, 0x66, 0xec, 0x24, 0x8a, 0xbd, 0x14, 0x8f, 0xfe, 0x0b, 0xca, + 0xbd, 0x53, 0x70, 0x52, 0x4a, 0xfe, 0x80, 0x4e, 0xd2, 0xfb, 0xde, 0xf7, 0xbd, 0xbb, 0xf7, 0xbd, + 0xc7, 0x21, 0xcb, 0x1f, 0xf8, 0xd9, 0xa9, 0x3b, 0xf2, 0xd3, 0x01, 0xe5, 0xee, 0xf8, 0x65, 0x9f, + 0x72, 0xff, 0x95, 0xcb, 0x52, 0x42, 0x53, 0x27, 0x49, 0x19, 0x67, 0xfa, 0x36, 0x30, 0x1c, 0xc9, + 0x70, 0x0a, 0x86, 0xb1, 0x1d, 0xb2, 0x90, 0x01, 0xc1, 0x15, 0x7f, 0x92, 0x6b, 0x34, 0x64, 0x35, + 0x42, 0x93, 0x21, 0xbb, 0x1c, 0xd1, 0xf8, 0xb6, 0xe2, 0x81, 0x1b, 0xa6, 0xec, 0x3c, 0xc9, 0x12, + 0x1a, 0x48, 0xa6, 0x3d, 0x57, 0xd1, 0x7a, 0x57, 0x9c, 0xd2, 0x39, 0xd2, 0x5d, 0x54, 0x61, 0x17, + 0x31, 0x4d, 0x77, 0x54, 0x4b, 0x6d, 0x54, 0xdb, 0x4f, 0x17, 0x39, 0x96, 0xc0, 0x32, 0xc7, 0x9b, + 0x97, 0xfe, 0x68, 0xd8, 0xb2, 0x21, 0xb4, 0x3d, 0x09, 0xeb, 0xfb, 0x48, 0x23, 0x19, 0x3d, 0xdb, + 0x29, 0x59, 0x6a, 0x43, 0x6b, 0xe3, 0x59, 0x8e, 0xb5, 0xa3, 0x1e, 0x3d, 0x5b, 0xe4, 0x18, 0xf0, + 0x65, 0x8e, 0x6b, 0x52, 0x26, 0x22, 0xdb, 0x03, 0x50, 0x88, 0x42, 0x21, 0x2a, 0x5b, 0x6a, 0x63, + 0x4b, 0x8a, 0x8e, 0x0b, 0x51, 0x78, 0x4f, 0x14, 0x4a, 0x51, 0x58, 0x88, 0x98, 0x10, 0x69, 0x2b, + 0x51, 0xb7, 0x10, 0xb1, 0x7b, 0x22, 0x26, 0x45, 0xe2, 0xd3, 0xda, 0xf8, 0x7a, 0x85, 0x95, 0x5f, + 0x57, 0x58, 0xb1, 0xbf, 0x97, 0x51, 0x05, 0xba, 0xd4, 0x3f, 0xa0, 0x0d, 0x30, 0xf5, 0x24, 0x22, + 0xd0, 0x66, 0xad, 0xb9, 0xe7, 0xfc, 0xcd, 0x58, 0xa7, 0x30, 0xa5, 0x6d, 0x5f, 0xe7, 0x58, 0x99, + 0xe5, 0xf8, 0xd6, 0xa5, 0x45, 0x8e, 0x4b, 0x11, 0x59, 0xe6, 0xb8, 0x2a, 0x0f, 0x8c, 0x88, 0xed, + 0xad, 0x43, 0xc9, 0x0e, 0xd1, 0x3d, 0x54, 0xc9, 0xb8, 0xcf, 0x29, 0x38, 0xf2, 0x5f, 0xf3, 0xd9, + 0x03, 0xa5, 0x9d, 0x9e, 0x20, 0x4a, 0x93, 0x41, 0xb3, 0x32, 0x19, 0x42, 0xdb, 0x93, 0xb0, 0xfe, + 0x0e, 0x69, 0x62, 0x5e, 0xe0, 0x57, 0xad, 0xf9, 0xbc, 0x28, 0xb9, 0x1a, 0x6d, 0x51, 0xf6, 0xc0, + 0x39, 0x16, 0xa3, 0xed, 0x25, 0x34, 0x68, 0xef, 0x8a, 0x3b, 0x0b, 0x6f, 0x84, 0x70, 0xe5, 0x8d, + 0x88, 0x6c, 0x0f, 0x40, 0x7d, 0x0f, 0xa1, 0x20, 0xa5, 0x3e, 0xa7, 0xe4, 0xc4, 0xe7, 0x60, 0x6b, + 0xd9, 0xab, 0x16, 0xc8, 0x21, 0xb7, 0x3f, 0xaa, 0xa8, 0x02, 0x17, 0xd4, 0x6d, 0xb4, 0x1e, 0xc5, + 0x63, 0x7f, 0x18, 0x91, 0xba, 0x62, 0x3c, 0x9e, 0x4c, 0xad, 0x47, 0x70, 0x7d, 0x48, 0x76, 0x64, + 0x42, 0x7f, 0x82, 0x34, 0x96, 0xd0, 0xb8, 0xae, 0x1a, 0x5b, 0x93, 0xa9, 0x55, 0x05, 0x42, 0x37, + 0xa1, 0xb1, 0xbe, 0x8b, 0xd6, 0xfc, 0x80, 0x47, 0x63, 0x5a, 0x2f, 0x19, 0xff, 0x4f, 0xa6, 0x56, + 0x0d, 0x52, 0x87, 0x00, 0x89, 0x64, 0x30, 0x64, 0x19, 0x25, 0xf5, 0xf2, 0x9d, 0xe4, 0x1b, 0x80, + 0x0c, 0xed, 0xd3, 0x37, 0x53, 0xb9, 0x33, 0xc1, 0x2f, 0x25, 0xb4, 0x09, 0xf9, 0xb7, 0xd1, 0x90, + 0xd3, 0x34, 0xfb, 0xd7, 0x96, 0x55, 0xf4, 0x23, 0x57, 0xa7, 0xb2, 0xea, 0xe7, 0xa1, 0xbd, 0x68, + 0x69, 0xc2, 0x97, 0x76, 0xef, 0x7a, 0x66, 0xaa, 0x37, 0x33, 0x53, 0xfd, 0x39, 0x33, 0xd5, 0xcf, + 0x73, 0x53, 0xb9, 0x99, 0x9b, 0xca, 0x8f, 0xb9, 0xa9, 0xbc, 0x7f, 0x1d, 0x46, 0xfc, 0xf4, 0xbc, + 0xef, 0x04, 0x6c, 0xe4, 0xc2, 0xce, 0xbc, 0x88, 0x29, 0xbf, 0x60, 0xe9, 0xa0, 0x88, 0xfc, 0x24, + 0x72, 0x43, 0xe6, 0xc6, 0x8c, 0xd0, 0x3f, 0x9e, 0x9d, 0xfe, 0x1a, 0xbc, 0x0d, 0xfb, 0xbf, 0x03, + 0x00, 0x00, 0xff, 0xff, 0x39, 0x50, 0x38, 0x67, 0x95, 0x04, 0x00, 0x00, +} + +func (m *OrderID) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrderID) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OrderID) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OSeq != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Order) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Order) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Order) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.CreatedAt != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.CreatedAt)) + i-- + dAtA[i] = 0x20 + } + { + size, err := m.Spec.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOrder(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + if m.State != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.State)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.OrderID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOrder(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *OrderFilters) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *OrderFilters) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *OrderFilters) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.State) > 0 { + i -= len(m.State) + copy(dAtA[i:], m.State) + i = encodeVarintOrder(dAtA, i, uint64(len(m.State))) + i-- + dAtA[i] = 0x2a + } + if m.OSeq != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.OSeq)) + i-- + dAtA[i] = 0x20 + } + if m.GSeq != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.GSeq)) + i-- + dAtA[i] = 0x18 + } + if m.DSeq != 0 { + i = encodeVarintOrder(dAtA, i, uint64(m.DSeq)) + i-- + dAtA[i] = 0x10 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintOrder(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintOrder(dAtA []byte, offset int, v uint64) int { + offset -= sovOrder(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *OrderID) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovOrder(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovOrder(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovOrder(uint64(m.OSeq)) + } + return n +} + +func (m *Order) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.OrderID.Size() + n += 1 + l + sovOrder(uint64(l)) + if m.State != 0 { + n += 1 + sovOrder(uint64(m.State)) + } + l = m.Spec.Size() + n += 1 + l + sovOrder(uint64(l)) + if m.CreatedAt != 0 { + n += 1 + sovOrder(uint64(m.CreatedAt)) + } + return n +} + +func (m *OrderFilters) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + if m.DSeq != 0 { + n += 1 + sovOrder(uint64(m.DSeq)) + } + if m.GSeq != 0 { + n += 1 + sovOrder(uint64(m.GSeq)) + } + if m.OSeq != 0 { + n += 1 + sovOrder(uint64(m.OSeq)) + } + l = len(m.State) + if l > 0 { + n += 1 + l + sovOrder(uint64(l)) + } + return n +} + +func sovOrder(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozOrder(x uint64) (n int) { + return sovOrder(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *OrderID) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrderID: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrderID: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOrder(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOrder + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Order) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Order: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Order: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.OrderID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + m.State = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.State |= Order_State(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Spec", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Spec.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field CreatedAt", wireType) + } + m.CreatedAt = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.CreatedAt |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOrder(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOrder + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *OrderFilters) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: OrderFilters: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: OrderFilters: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DSeq", wireType) + } + m.DSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DSeq |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field GSeq", wireType) + } + m.GSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.GSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OSeq", wireType) + } + m.OSeq = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OSeq |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field State", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOrder + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOrder + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOrder + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.State = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOrder(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOrder + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipOrder(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowOrder + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthOrder + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupOrder + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthOrder + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthOrder = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowOrder = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupOrder = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/params.go b/go/node/market/v1beta5/params.go new file mode 100644 index 00000000..23d4954d --- /dev/null +++ b/go/node/market/v1beta5/params.go @@ -0,0 +1,77 @@ +package v1beta5 + +import ( + "fmt" + + sdk "github.com/cosmos/cosmos-sdk/types" + paramtypes "github.com/cosmos/cosmos-sdk/x/params/types" +) + +var _ paramtypes.ParamSet = (*Params)(nil) + +var ( + DefaultBidMinDeposit = sdk.NewCoin("uakt", sdk.NewInt(500000)) + defaultOrderMaxBids uint32 = 20 + maxOrderMaxBids uint32 = 500 +) + +const ( + keyBidMinDeposit = "BidMinDeposit" + keyOrderMaxBids = "OrderMaxBids" +) + +func ParamKeyTable() paramtypes.KeyTable { + return paramtypes.NewKeyTable().RegisterParamSet(&Params{}) +} + +func (p *Params) ParamSetPairs() paramtypes.ParamSetPairs { + return paramtypes.ParamSetPairs{ + paramtypes.NewParamSetPair([]byte(keyBidMinDeposit), &p.BidMinDeposit, validateCoin), + paramtypes.NewParamSetPair([]byte(keyOrderMaxBids), &p.OrderMaxBids, validateOrderMaxBids), + } +} + +func DefaultParams() Params { + return Params{ + BidMinDeposit: DefaultBidMinDeposit, + OrderMaxBids: defaultOrderMaxBids, + } +} + +func (p Params) Validate() error { + if err := validateCoin(p.BidMinDeposit); err != nil { + return err + } + + if err := validateOrderMaxBids(p.OrderMaxBids); err != nil { + return err + } + return nil +} + +func validateCoin(i interface{}) error { + _, ok := i.(sdk.Coin) + if !ok { + return fmt.Errorf("%w: invalid type %T", ErrInvalidParam, i) + } + + return nil +} + +func validateOrderMaxBids(i interface{}) error { + val, ok := i.(uint32) + + if !ok { + return fmt.Errorf("%w: invalid type %T", ErrInvalidParam, i) + } + + if val == 0 { + return fmt.Errorf("%w: order max bids too low", ErrInvalidParam) + } + + if val > maxOrderMaxBids { + return fmt.Errorf("%w: order max bids too high", ErrInvalidParam) + } + + return nil +} diff --git a/go/node/market/v1beta5/params.pb.go b/go/node/market/v1beta5/params.pb.go new file mode 100644 index 00000000..0a9b5549 --- /dev/null +++ b/go/node/market/v1beta5/params.pb.go @@ -0,0 +1,365 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/params.proto + +package v1beta5 + +import ( + fmt "fmt" + types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Params is the params for the x/market module +type Params struct { + BidMinDeposit types.Coin `protobuf:"bytes,1,opt,name=bid_min_deposit,json=bidMinDeposit,proto3" json:"bid_min_deposit" yaml:"bid_min_deposit"` + OrderMaxBids uint32 `protobuf:"varint,2,opt,name=order_max_bids,json=orderMaxBids,proto3" json:"order_max_bids" yaml:"order_max_bids"` +} + +func (m *Params) Reset() { *m = Params{} } +func (m *Params) String() string { return proto.CompactTextString(m) } +func (*Params) ProtoMessage() {} +func (*Params) Descriptor() ([]byte, []int) { + return fileDescriptor_5db3b08f7b20cd98, []int{0} +} +func (m *Params) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Params) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Params.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Params) XXX_Merge(src proto.Message) { + xxx_messageInfo_Params.Merge(m, src) +} +func (m *Params) XXX_Size() int { + return m.Size() +} +func (m *Params) XXX_DiscardUnknown() { + xxx_messageInfo_Params.DiscardUnknown(m) +} + +var xxx_messageInfo_Params proto.InternalMessageInfo + +func (m *Params) GetBidMinDeposit() types.Coin { + if m != nil { + return m.BidMinDeposit + } + return types.Coin{} +} + +func (m *Params) GetOrderMaxBids() uint32 { + if m != nil { + return m.OrderMaxBids + } + return 0 +} + +func init() { + proto.RegisterType((*Params)(nil), "akash.market.v1beta5.Params") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/params.proto", fileDescriptor_5db3b08f7b20cd98) } + +var fileDescriptor_5db3b08f7b20cd98 = []byte{ + // 322 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x5c, 0x91, 0x31, 0x4f, 0xe3, 0x30, + 0x18, 0x86, 0xe3, 0x1b, 0x3a, 0xe4, 0xda, 0x3b, 0xa9, 0xea, 0x9d, 0x4a, 0x07, 0xbb, 0x64, 0xea, + 0x82, 0xad, 0x82, 0x18, 0x80, 0x2d, 0xb0, 0x56, 0xa0, 0xb2, 0xb1, 0x44, 0x76, 0x6d, 0xa5, 0x56, + 0x71, 0xbe, 0x28, 0x0e, 0x50, 0x7e, 0x00, 0x3b, 0x3f, 0xab, 0x63, 0x47, 0x26, 0x0b, 0xa5, 0x5b, + 0xc7, 0xf2, 0x07, 0x50, 0x93, 0x0c, 0x25, 0x9b, 0xed, 0xf7, 0x79, 0x9f, 0x4f, 0xfa, 0xec, 0x1f, + 0xf3, 0x05, 0xb7, 0x73, 0x66, 0x78, 0xb6, 0x50, 0x39, 0x7b, 0x1e, 0x0b, 0x95, 0xf3, 0x73, 0x96, + 0xf2, 0x8c, 0x1b, 0x4b, 0xd3, 0x0c, 0x72, 0xe8, 0xf6, 0x4a, 0x84, 0x56, 0x08, 0xad, 0x91, 0x41, + 0x2f, 0x86, 0x18, 0x4a, 0x80, 0xed, 0x4f, 0x15, 0x3b, 0xc0, 0x33, 0xb0, 0x06, 0x2c, 0x13, 0xdc, + 0xaa, 0xda, 0x36, 0x66, 0x33, 0xd0, 0x49, 0x95, 0x07, 0x5f, 0xc8, 0x6f, 0xdd, 0x95, 0xf2, 0xee, + 0x1b, 0xf2, 0xff, 0x0a, 0x2d, 0x23, 0xa3, 0x93, 0x48, 0xaa, 0x14, 0xac, 0xce, 0xfb, 0x68, 0x88, + 0x46, 0xbf, 0x4f, 0x8f, 0x68, 0x65, 0xa1, 0x7b, 0x4b, 0x3d, 0x70, 0x4c, 0xaf, 0x41, 0x27, 0x61, + 0xb8, 0x72, 0xc4, 0x2b, 0x1c, 0xe9, 0x84, 0x5a, 0x4e, 0x74, 0x72, 0x53, 0xf5, 0xb6, 0x8e, 0x34, + 0x55, 0x3b, 0x47, 0xfe, 0xbf, 0x72, 0xf3, 0x78, 0x19, 0x34, 0x82, 0x60, 0xda, 0x11, 0x87, 0xdd, + 0x2e, 0xf7, 0xff, 0x40, 0x26, 0x55, 0x16, 0x19, 0xbe, 0x8c, 0x84, 0x96, 0xb6, 0xff, 0x6b, 0x88, + 0x46, 0x9d, 0xf0, 0xaa, 0x70, 0xa4, 0x7d, 0xbb, 0x4f, 0x26, 0x7c, 0x19, 0x6a, 0x69, 0xb7, 0x8e, + 0x34, 0xc8, 0x9d, 0x23, 0xff, 0xaa, 0x21, 0x3f, 0xdf, 0x83, 0x69, 0x1b, 0x0e, 0x8a, 0xe1, 0xfd, + 0xaa, 0xc0, 0x68, 0x5d, 0x60, 0xf4, 0x59, 0x60, 0xf4, 0xbe, 0xc1, 0xde, 0x7a, 0x83, 0xbd, 0x8f, + 0x0d, 0xf6, 0x1e, 0x2e, 0x62, 0x9d, 0xcf, 0x9f, 0x04, 0x9d, 0x81, 0x61, 0xe5, 0x9a, 0x4f, 0x12, + 0x95, 0xbf, 0x40, 0xb6, 0xa8, 0x6f, 0x3c, 0xd5, 0x2c, 0x06, 0x96, 0x80, 0x54, 0x8d, 0x3f, 0x12, + 0xad, 0x72, 0xa3, 0x67, 0xdf, 0x01, 0x00, 0x00, 0xff, 0xff, 0xdf, 0x40, 0x96, 0xab, 0xc2, 0x01, + 0x00, 0x00, +} + +func (m *Params) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Params) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Params) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.OrderMaxBids != 0 { + i = encodeVarintParams(dAtA, i, uint64(m.OrderMaxBids)) + i-- + dAtA[i] = 0x10 + } + { + size, err := m.BidMinDeposit.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintParams(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintParams(dAtA []byte, offset int, v uint64) int { + offset -= sovParams(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Params) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.BidMinDeposit.Size() + n += 1 + l + sovParams(uint64(l)) + if m.OrderMaxBids != 0 { + n += 1 + sovParams(uint64(m.OrderMaxBids)) + } + return n +} + +func sovParams(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozParams(x uint64) (n int) { + return sovParams(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Params) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Params: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Params: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BidMinDeposit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthParams + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthParams + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.BidMinDeposit.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OrderMaxBids", wireType) + } + m.OrderMaxBids = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowParams + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OrderMaxBids |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipParams(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthParams + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipParams(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowParams + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthParams + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupParams + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthParams + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthParams = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowParams = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupParams = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/query.pb.go b/go/node/market/v1beta5/query.pb.go new file mode 100644 index 00000000..fc60e31a --- /dev/null +++ b/go/node/market/v1beta5/query.pb.go @@ -0,0 +1,3035 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/query.proto + +package v1beta5 + +import ( + context "context" + fmt "fmt" + v1beta3 "github.com/akash-network/akash-api/go/node/escrow/v1beta3" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryOrdersRequest is request type for the Query/Orders RPC method +type QueryOrdersRequest struct { + Filters OrderFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryOrdersRequest) Reset() { *m = QueryOrdersRequest{} } +func (m *QueryOrdersRequest) String() string { return proto.CompactTextString(m) } +func (*QueryOrdersRequest) ProtoMessage() {} +func (*QueryOrdersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{0} +} +func (m *QueryOrdersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrdersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrdersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrdersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrdersRequest.Merge(m, src) +} +func (m *QueryOrdersRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryOrdersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrdersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrdersRequest proto.InternalMessageInfo + +func (m *QueryOrdersRequest) GetFilters() OrderFilters { + if m != nil { + return m.Filters + } + return OrderFilters{} +} + +func (m *QueryOrdersRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryOrdersResponse is response type for the Query/Orders RPC method +type QueryOrdersResponse struct { + Orders Orders `protobuf:"bytes,1,rep,name=orders,proto3,castrepeated=Orders" json:"orders"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryOrdersResponse) Reset() { *m = QueryOrdersResponse{} } +func (m *QueryOrdersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryOrdersResponse) ProtoMessage() {} +func (*QueryOrdersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{1} +} +func (m *QueryOrdersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrdersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrdersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrdersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrdersResponse.Merge(m, src) +} +func (m *QueryOrdersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryOrdersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrdersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrdersResponse proto.InternalMessageInfo + +func (m *QueryOrdersResponse) GetOrders() Orders { + if m != nil { + return m.Orders + } + return nil +} + +func (m *QueryOrdersResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryOrderRequest is request type for the Query/Order RPC method +type QueryOrderRequest struct { + ID OrderID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryOrderRequest) Reset() { *m = QueryOrderRequest{} } +func (m *QueryOrderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryOrderRequest) ProtoMessage() {} +func (*QueryOrderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{2} +} +func (m *QueryOrderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderRequest.Merge(m, src) +} +func (m *QueryOrderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderRequest proto.InternalMessageInfo + +func (m *QueryOrderRequest) GetID() OrderID { + if m != nil { + return m.ID + } + return OrderID{} +} + +// QueryOrderResponse is response type for the Query/Order RPC method +type QueryOrderResponse struct { + Order Order `protobuf:"bytes,1,opt,name=order,proto3" json:"order"` +} + +func (m *QueryOrderResponse) Reset() { *m = QueryOrderResponse{} } +func (m *QueryOrderResponse) String() string { return proto.CompactTextString(m) } +func (*QueryOrderResponse) ProtoMessage() {} +func (*QueryOrderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{3} +} +func (m *QueryOrderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryOrderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryOrderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryOrderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryOrderResponse.Merge(m, src) +} +func (m *QueryOrderResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryOrderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryOrderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryOrderResponse proto.InternalMessageInfo + +func (m *QueryOrderResponse) GetOrder() Order { + if m != nil { + return m.Order + } + return Order{} +} + +// QueryBidsRequest is request type for the Query/Bids RPC method +type QueryBidsRequest struct { + Filters BidFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBidsRequest) Reset() { *m = QueryBidsRequest{} } +func (m *QueryBidsRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBidsRequest) ProtoMessage() {} +func (*QueryBidsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{4} +} +func (m *QueryBidsRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBidsRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBidsRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBidsRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBidsRequest.Merge(m, src) +} +func (m *QueryBidsRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryBidsRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBidsRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBidsRequest proto.InternalMessageInfo + +func (m *QueryBidsRequest) GetFilters() BidFilters { + if m != nil { + return m.Filters + } + return BidFilters{} +} + +func (m *QueryBidsRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryBidsResponse is response type for the Query/Bids RPC method +type QueryBidsResponse struct { + Bids []QueryBidResponse `protobuf:"bytes,1,rep,name=bids,proto3" json:"bids"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryBidsResponse) Reset() { *m = QueryBidsResponse{} } +func (m *QueryBidsResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBidsResponse) ProtoMessage() {} +func (*QueryBidsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{5} +} +func (m *QueryBidsResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBidsResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBidsResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBidsResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBidsResponse.Merge(m, src) +} +func (m *QueryBidsResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryBidsResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBidsResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBidsResponse proto.InternalMessageInfo + +func (m *QueryBidsResponse) GetBids() []QueryBidResponse { + if m != nil { + return m.Bids + } + return nil +} + +func (m *QueryBidsResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryBidRequest is request type for the Query/Bid RPC method +type QueryBidRequest struct { + ID BidID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryBidRequest) Reset() { *m = QueryBidRequest{} } +func (m *QueryBidRequest) String() string { return proto.CompactTextString(m) } +func (*QueryBidRequest) ProtoMessage() {} +func (*QueryBidRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{6} +} +func (m *QueryBidRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBidRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBidRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBidRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBidRequest.Merge(m, src) +} +func (m *QueryBidRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryBidRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBidRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBidRequest proto.InternalMessageInfo + +func (m *QueryBidRequest) GetID() BidID { + if m != nil { + return m.ID + } + return BidID{} +} + +// QueryBidResponse is response type for the Query/Bid RPC method +type QueryBidResponse struct { + Bid Bid `protobuf:"bytes,1,opt,name=bid,proto3" json:"bid"` + EscrowAccount v1beta3.Account `protobuf:"bytes,2,opt,name=escrow_account,json=escrowAccount,proto3" json:"escrow_account"` +} + +func (m *QueryBidResponse) Reset() { *m = QueryBidResponse{} } +func (m *QueryBidResponse) String() string { return proto.CompactTextString(m) } +func (*QueryBidResponse) ProtoMessage() {} +func (*QueryBidResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{7} +} +func (m *QueryBidResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryBidResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryBidResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryBidResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryBidResponse.Merge(m, src) +} +func (m *QueryBidResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryBidResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryBidResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryBidResponse proto.InternalMessageInfo + +func (m *QueryBidResponse) GetBid() Bid { + if m != nil { + return m.Bid + } + return Bid{} +} + +func (m *QueryBidResponse) GetEscrowAccount() v1beta3.Account { + if m != nil { + return m.EscrowAccount + } + return v1beta3.Account{} +} + +// QueryLeasesRequest is request type for the Query/Leases RPC method +type QueryLeasesRequest struct { + Filters LeaseFilters `protobuf:"bytes,1,opt,name=filters,proto3" json:"filters"` + Pagination *query.PageRequest `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryLeasesRequest) Reset() { *m = QueryLeasesRequest{} } +func (m *QueryLeasesRequest) String() string { return proto.CompactTextString(m) } +func (*QueryLeasesRequest) ProtoMessage() {} +func (*QueryLeasesRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{8} +} +func (m *QueryLeasesRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLeasesRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLeasesRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLeasesRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLeasesRequest.Merge(m, src) +} +func (m *QueryLeasesRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryLeasesRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLeasesRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLeasesRequest proto.InternalMessageInfo + +func (m *QueryLeasesRequest) GetFilters() LeaseFilters { + if m != nil { + return m.Filters + } + return LeaseFilters{} +} + +func (m *QueryLeasesRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryLeasesResponse is response type for the Query/Leases RPC method +type QueryLeasesResponse struct { + Leases []QueryLeaseResponse `protobuf:"bytes,1,rep,name=leases,proto3" json:"leases"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryLeasesResponse) Reset() { *m = QueryLeasesResponse{} } +func (m *QueryLeasesResponse) String() string { return proto.CompactTextString(m) } +func (*QueryLeasesResponse) ProtoMessage() {} +func (*QueryLeasesResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{9} +} +func (m *QueryLeasesResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLeasesResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLeasesResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLeasesResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLeasesResponse.Merge(m, src) +} +func (m *QueryLeasesResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryLeasesResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLeasesResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLeasesResponse proto.InternalMessageInfo + +func (m *QueryLeasesResponse) GetLeases() []QueryLeaseResponse { + if m != nil { + return m.Leases + } + return nil +} + +func (m *QueryLeasesResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryLeaseRequest is request type for the Query/Lease RPC method +type QueryLeaseRequest struct { + ID LeaseID `protobuf:"bytes,1,opt,name=id,proto3" json:"id"` +} + +func (m *QueryLeaseRequest) Reset() { *m = QueryLeaseRequest{} } +func (m *QueryLeaseRequest) String() string { return proto.CompactTextString(m) } +func (*QueryLeaseRequest) ProtoMessage() {} +func (*QueryLeaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{10} +} +func (m *QueryLeaseRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLeaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLeaseRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLeaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLeaseRequest.Merge(m, src) +} +func (m *QueryLeaseRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryLeaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLeaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLeaseRequest proto.InternalMessageInfo + +func (m *QueryLeaseRequest) GetID() LeaseID { + if m != nil { + return m.ID + } + return LeaseID{} +} + +// QueryLeaseResponse is response type for the Query/Lease RPC method +type QueryLeaseResponse struct { + Lease Lease `protobuf:"bytes,1,opt,name=lease,proto3" json:"lease"` + EscrowPayment v1beta3.FractionalPayment `protobuf:"bytes,2,opt,name=escrow_payment,json=escrowPayment,proto3" json:"escrow_payment"` +} + +func (m *QueryLeaseResponse) Reset() { *m = QueryLeaseResponse{} } +func (m *QueryLeaseResponse) String() string { return proto.CompactTextString(m) } +func (*QueryLeaseResponse) ProtoMessage() {} +func (*QueryLeaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_4fc8c96bdc37dc38, []int{11} +} +func (m *QueryLeaseResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryLeaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryLeaseResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryLeaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryLeaseResponse.Merge(m, src) +} +func (m *QueryLeaseResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryLeaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryLeaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryLeaseResponse proto.InternalMessageInfo + +func (m *QueryLeaseResponse) GetLease() Lease { + if m != nil { + return m.Lease + } + return Lease{} +} + +func (m *QueryLeaseResponse) GetEscrowPayment() v1beta3.FractionalPayment { + if m != nil { + return m.EscrowPayment + } + return v1beta3.FractionalPayment{} +} + +func init() { + proto.RegisterType((*QueryOrdersRequest)(nil), "akash.market.v1beta5.QueryOrdersRequest") + proto.RegisterType((*QueryOrdersResponse)(nil), "akash.market.v1beta5.QueryOrdersResponse") + proto.RegisterType((*QueryOrderRequest)(nil), "akash.market.v1beta5.QueryOrderRequest") + proto.RegisterType((*QueryOrderResponse)(nil), "akash.market.v1beta5.QueryOrderResponse") + proto.RegisterType((*QueryBidsRequest)(nil), "akash.market.v1beta5.QueryBidsRequest") + proto.RegisterType((*QueryBidsResponse)(nil), "akash.market.v1beta5.QueryBidsResponse") + proto.RegisterType((*QueryBidRequest)(nil), "akash.market.v1beta5.QueryBidRequest") + proto.RegisterType((*QueryBidResponse)(nil), "akash.market.v1beta5.QueryBidResponse") + proto.RegisterType((*QueryLeasesRequest)(nil), "akash.market.v1beta5.QueryLeasesRequest") + proto.RegisterType((*QueryLeasesResponse)(nil), "akash.market.v1beta5.QueryLeasesResponse") + proto.RegisterType((*QueryLeaseRequest)(nil), "akash.market.v1beta5.QueryLeaseRequest") + proto.RegisterType((*QueryLeaseResponse)(nil), "akash.market.v1beta5.QueryLeaseResponse") +} + +func init() { proto.RegisterFile("akash/market/v1beta5/query.proto", fileDescriptor_4fc8c96bdc37dc38) } + +var fileDescriptor_4fc8c96bdc37dc38 = []byte{ + // 803 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xb4, 0x56, 0xcf, 0x4f, 0x13, 0x4d, + 0x18, 0xee, 0x16, 0xe8, 0x97, 0x0c, 0xf9, 0xf8, 0xbe, 0x6f, 0x3e, 0x0e, 0x58, 0xb4, 0x85, 0x1a, + 0x69, 0x31, 0x71, 0x37, 0x40, 0x0c, 0xe1, 0x86, 0x2b, 0xa9, 0x81, 0xf8, 0x03, 0xab, 0x27, 0x2f, + 0x66, 0xda, 0x1d, 0x96, 0x09, 0xed, 0x4e, 0xd9, 0xd9, 0x4a, 0x38, 0x98, 0x18, 0x13, 0x13, 0x8f, + 0x1a, 0xaf, 0xc6, 0x98, 0x90, 0x78, 0xf0, 0xe2, 0xbf, 0xc1, 0x91, 0xc4, 0x8b, 0x27, 0x34, 0xc5, + 0x3f, 0xc4, 0xcc, 0xcc, 0xbb, 0xfd, 0x95, 0xed, 0x6e, 0x49, 0xe0, 0x06, 0xdd, 0xe7, 0x7d, 0xe7, + 0x79, 0x9f, 0xf7, 0x99, 0x67, 0x17, 0xcd, 0x91, 0x3d, 0x22, 0x76, 0xad, 0x06, 0xf1, 0xf7, 0x68, + 0x60, 0xbd, 0x58, 0xaa, 0xd2, 0x80, 0xdc, 0xb6, 0xf6, 0x5b, 0xd4, 0x3f, 0x34, 0x9b, 0x3e, 0x0f, + 0x38, 0x9e, 0x56, 0x08, 0x53, 0x23, 0x4c, 0x40, 0x64, 0xa7, 0x5d, 0xee, 0x72, 0x05, 0xb0, 0xe4, + 0x5f, 0x1a, 0x9b, 0xbd, 0xea, 0x72, 0xee, 0xd6, 0xa9, 0x45, 0x9a, 0xcc, 0x22, 0x9e, 0xc7, 0x03, + 0x12, 0x30, 0xee, 0x09, 0x78, 0x7a, 0xb3, 0xc6, 0x45, 0x83, 0x0b, 0xab, 0x4a, 0x04, 0xd5, 0x47, + 0xc0, 0x81, 0x4b, 0x56, 0x93, 0xb8, 0xcc, 0x53, 0x60, 0xc0, 0x02, 0x2f, 0x2a, 0x6a, 0x3e, 0x3f, + 0x00, 0xd8, 0x8a, 0x15, 0x1c, 0x36, 0xa9, 0xe8, 0x47, 0x0c, 0x30, 0xe7, 0xbe, 0x43, 0x7d, 0x40, + 0xe4, 0x22, 0x11, 0x55, 0xe6, 0xc4, 0x76, 0xa8, 0x53, 0x22, 0xa8, 0x46, 0x14, 0x3e, 0x1b, 0x08, + 0x3f, 0x96, 0x44, 0x1f, 0xc9, 0xb6, 0xa2, 0x42, 0xf7, 0x5b, 0x54, 0x04, 0xd8, 0x46, 0x7f, 0xed, + 0xb0, 0x7a, 0x40, 0x7d, 0x31, 0x63, 0xcc, 0x19, 0xa5, 0xc9, 0xe5, 0x82, 0x19, 0x25, 0x92, 0xa9, + 0xaa, 0xca, 0x1a, 0x69, 0x8f, 0x1f, 0x9f, 0xe6, 0x53, 0x95, 0xb0, 0x10, 0x97, 0x11, 0xea, 0x0e, + 0x3d, 0x93, 0x56, 0x6d, 0x16, 0x4c, 0xad, 0x90, 0x29, 0x15, 0x32, 0xf5, 0x12, 0x40, 0x21, 0x73, + 0x9b, 0xb8, 0x14, 0xce, 0xaf, 0xf4, 0x54, 0x16, 0x8e, 0x0c, 0xf4, 0x7f, 0x1f, 0x45, 0xd1, 0xe4, + 0x9e, 0xa0, 0xf8, 0x2e, 0xca, 0x28, 0x2d, 0x24, 0xc5, 0xb1, 0xd2, 0xe4, 0xf2, 0x6c, 0x0c, 0x45, + 0x7b, 0x4a, 0x72, 0xfb, 0xfa, 0x33, 0x9f, 0x81, 0x26, 0x50, 0x8a, 0xef, 0x45, 0x90, 0x2c, 0x26, + 0x92, 0xd4, 0x0c, 0xfa, 0x58, 0x3e, 0x44, 0xff, 0x75, 0x49, 0x86, 0x32, 0xae, 0xa1, 0x34, 0x73, + 0x40, 0xc1, 0x6b, 0x31, 0xf4, 0x36, 0x37, 0x6c, 0x24, 0x09, 0xb6, 0x4f, 0xf3, 0xe9, 0xcd, 0x8d, + 0x4a, 0x9a, 0x39, 0x85, 0x07, 0xbd, 0x7b, 0xe9, 0xcc, 0xbc, 0x8a, 0x26, 0x14, 0x71, 0xe8, 0x19, + 0x3b, 0xb2, 0x5e, 0x87, 0xc6, 0x17, 0x3e, 0x1a, 0xe8, 0x5f, 0xd5, 0xcf, 0x66, 0x4e, 0x67, 0xcb, + 0xeb, 0x83, 0x5b, 0x9e, 0x8b, 0xee, 0x67, 0x33, 0xe7, 0x92, 0x77, 0xfc, 0xc9, 0x00, 0xf9, 0x34, + 0x3d, 0x98, 0x76, 0x1d, 0x8d, 0x57, 0x99, 0x13, 0xee, 0x77, 0x21, 0x9a, 0x5c, 0x58, 0x16, 0x56, + 0x01, 0x45, 0x55, 0x79, 0x71, 0xeb, 0xdd, 0x42, 0xff, 0x74, 0x0f, 0xd2, 0xea, 0xad, 0xf6, 0x2c, + 0x77, 0x76, 0xa8, 0x70, 0x11, 0xab, 0x7d, 0xdf, 0xb3, 0x8b, 0xce, 0xac, 0x4b, 0x68, 0xac, 0xda, + 0x69, 0x77, 0x65, 0x68, 0x3b, 0x98, 0x4e, 0x62, 0xf1, 0x16, 0x9a, 0xd2, 0xe9, 0xf1, 0x9c, 0xd4, + 0x6a, 0xbc, 0xe5, 0x05, 0x30, 0x60, 0xe8, 0x34, 0xfd, 0x10, 0xaa, 0x57, 0xcc, 0x3b, 0x1a, 0x04, + 0x1d, 0xfe, 0xd6, 0x4f, 0xe1, 0xc7, 0x6e, 0x0e, 0xdc, 0x97, 0xe1, 0x70, 0xee, 0x1c, 0x50, 0x55, + 0x97, 0xec, 0x91, 0x2f, 0x61, 0x0e, 0x84, 0x14, 0x41, 0xb9, 0x32, 0xca, 0xa8, 0x44, 0x0b, 0x7d, + 0x52, 0x8a, 0xf1, 0x89, 0x2a, 0x1d, 0x70, 0x0a, 0x54, 0x5f, 0x7c, 0x14, 0xc0, 0x61, 0x23, 0x47, + 0x81, 0xc2, 0x47, 0xf8, 0xe5, 0xa8, 0x6f, 0x37, 0xbd, 0x59, 0xa0, 0x98, 0xc7, 0x5b, 0x50, 0xd5, + 0x84, 0x59, 0xa0, 0xf0, 0xf8, 0x69, 0xc7, 0x37, 0x4d, 0x72, 0xd8, 0xa0, 0x1d, 0xdf, 0x14, 0xa3, + 0x7d, 0x53, 0xf6, 0x49, 0x4d, 0xce, 0x45, 0xea, 0xdb, 0x1a, 0xde, 0xef, 0x20, 0xf8, 0x71, 0xf9, + 0x5b, 0x06, 0x4d, 0x28, 0x96, 0xf8, 0xad, 0x81, 0x20, 0x66, 0x71, 0xdc, 0x2e, 0xfa, 0xde, 0x38, + 0xd9, 0xc5, 0x11, 0x90, 0x7a, 0xf0, 0xc2, 0xe2, 0xeb, 0xef, 0xbf, 0x3f, 0xa4, 0xaf, 0xe3, 0x79, + 0x6b, 0xf8, 0x0b, 0x52, 0x58, 0x75, 0x26, 0x02, 0xfc, 0xc6, 0x40, 0x13, 0xaa, 0x1a, 0x17, 0x93, + 0xfa, 0x87, 0x44, 0x4a, 0xc9, 0xc0, 0x73, 0xf1, 0x60, 0xde, 0x0e, 0xc7, 0xaf, 0x0c, 0x34, 0x2e, + 0xa3, 0x0d, 0x27, 0x84, 0x58, 0x47, 0x8e, 0x62, 0x22, 0x0e, 0x48, 0x14, 0x15, 0x89, 0x79, 0x9c, + 0xb7, 0x86, 0x7d, 0x0b, 0x80, 0x14, 0x2f, 0xd1, 0x98, 0xcd, 0x1c, 0x7c, 0x23, 0x29, 0x45, 0xf5, + 0xf9, 0x23, 0x86, 0xed, 0x48, 0xc7, 0x2b, 0x05, 0xa4, 0x29, 0xf4, 0xc5, 0xc5, 0x89, 0x17, 0x74, + 0x24, 0x53, 0xf4, 0xa7, 0x40, 0xd2, 0x32, 0xf4, 0x1d, 0xef, 0x9a, 0x42, 0x55, 0xc7, 0x9a, 0xa2, + 0xf7, 0xf6, 0x66, 0x47, 0xce, 0x94, 0x11, 0x79, 0x48, 0x49, 0xec, 0x27, 0xc7, 0xed, 0x9c, 0x71, + 0xd2, 0xce, 0x19, 0xbf, 0xda, 0x39, 0xe3, 0xdd, 0x59, 0x2e, 0x75, 0x72, 0x96, 0x4b, 0xfd, 0x38, + 0xcb, 0xa5, 0x9e, 0xad, 0xb9, 0x2c, 0xd8, 0x6d, 0x55, 0xcd, 0x1a, 0x6f, 0xe8, 0x36, 0xb7, 0x3c, + 0x1a, 0x1c, 0x70, 0x7f, 0x0f, 0xfe, 0x93, 0x5f, 0x9f, 0x2e, 0xb7, 0x3c, 0xee, 0xd0, 0x81, 0x03, + 0xaa, 0x19, 0xf5, 0x5d, 0xb7, 0xf2, 0x27, 0x00, 0x00, 0xff, 0xff, 0x82, 0x5d, 0x67, 0x29, 0xf7, + 0x0a, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Orders queries orders with filters + Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) + // Order queries order details + Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) + // Bids queries bids with filters + Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) + // Bid queries bid details + Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) + // Leases queries leases with filters + Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) + // Lease queries lease details + Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Orders(ctx context.Context, in *QueryOrdersRequest, opts ...grpc.CallOption) (*QueryOrdersResponse, error) { + out := new(QueryOrdersResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Orders", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Order(ctx context.Context, in *QueryOrderRequest, opts ...grpc.CallOption) (*QueryOrderResponse, error) { + out := new(QueryOrderResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Order", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Bids(ctx context.Context, in *QueryBidsRequest, opts ...grpc.CallOption) (*QueryBidsResponse, error) { + out := new(QueryBidsResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Bids", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Bid(ctx context.Context, in *QueryBidRequest, opts ...grpc.CallOption) (*QueryBidResponse, error) { + out := new(QueryBidResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Bid", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Leases(ctx context.Context, in *QueryLeasesRequest, opts ...grpc.CallOption) (*QueryLeasesResponse, error) { + out := new(QueryLeasesResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Leases", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Lease(ctx context.Context, in *QueryLeaseRequest, opts ...grpc.CallOption) (*QueryLeaseResponse, error) { + out := new(QueryLeaseResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Query/Lease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Orders queries orders with filters + Orders(context.Context, *QueryOrdersRequest) (*QueryOrdersResponse, error) + // Order queries order details + Order(context.Context, *QueryOrderRequest) (*QueryOrderResponse, error) + // Bids queries bids with filters + Bids(context.Context, *QueryBidsRequest) (*QueryBidsResponse, error) + // Bid queries bid details + Bid(context.Context, *QueryBidRequest) (*QueryBidResponse, error) + // Leases queries leases with filters + Leases(context.Context, *QueryLeasesRequest) (*QueryLeasesResponse, error) + // Lease queries lease details + Lease(context.Context, *QueryLeaseRequest) (*QueryLeaseResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Orders(ctx context.Context, req *QueryOrdersRequest) (*QueryOrdersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Orders not implemented") +} +func (*UnimplementedQueryServer) Order(ctx context.Context, req *QueryOrderRequest) (*QueryOrderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Order not implemented") +} +func (*UnimplementedQueryServer) Bids(ctx context.Context, req *QueryBidsRequest) (*QueryBidsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Bids not implemented") +} +func (*UnimplementedQueryServer) Bid(ctx context.Context, req *QueryBidRequest) (*QueryBidResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Bid not implemented") +} +func (*UnimplementedQueryServer) Leases(ctx context.Context, req *QueryLeasesRequest) (*QueryLeasesResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Leases not implemented") +} +func (*UnimplementedQueryServer) Lease(ctx context.Context, req *QueryLeaseRequest) (*QueryLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Lease not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Orders_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryOrdersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Orders(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Orders", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Orders(ctx, req.(*QueryOrdersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Order_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryOrderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Order(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Order", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Order(ctx, req.(*QueryOrderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Bids_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBidsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Bids(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Bids", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Bids(ctx, req.(*QueryBidsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Bid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryBidRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Bid(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Bid", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Bid(ctx, req.(*QueryBidRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Leases_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryLeasesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Leases(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Leases", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Leases(ctx, req.(*QueryLeasesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Lease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryLeaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Lease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Query/Lease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Lease(ctx, req.(*QueryLeaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.market.v1beta5.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Orders", + Handler: _Query_Orders_Handler, + }, + { + MethodName: "Order", + Handler: _Query_Order_Handler, + }, + { + MethodName: "Bids", + Handler: _Query_Bids_Handler, + }, + { + MethodName: "Bid", + Handler: _Query_Bid_Handler, + }, + { + MethodName: "Leases", + Handler: _Query_Leases_Handler, + }, + { + MethodName: "Lease", + Handler: _Query_Lease_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/market/v1beta5/query.proto", +} + +func (m *QueryOrdersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrdersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrdersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryOrdersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrdersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrdersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Orders) > 0 { + for iNdEx := len(m.Orders) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Orders[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryOrderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryOrderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryOrderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryOrderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Order.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryBidsRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBidsRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBidsRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryBidsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBidsResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBidsResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Bids) > 0 { + for iNdEx := len(m.Bids) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Bids[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryBidRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBidRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBidRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryBidResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryBidResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryBidResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.EscrowAccount.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Bid.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryLeasesRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLeasesRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLeasesRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + { + size, err := m.Filters.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryLeasesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLeasesResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLeasesResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Leases) > 0 { + for iNdEx := len(m.Leases) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Leases[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryLeaseRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLeaseRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLeaseRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.ID.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *QueryLeaseResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryLeaseResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryLeaseResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.EscrowPayment.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.Lease.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryOrdersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Filters.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryOrdersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Orders) > 0 { + for _, e := range m.Orders { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryOrderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryOrderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Order.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryBidsRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Filters.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBidsResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Bids) > 0 { + for _, e := range m.Bids { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryBidRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryBidResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Bid.Size() + n += 1 + l + sovQuery(uint64(l)) + l = m.EscrowAccount.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryLeasesRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Filters.Size() + n += 1 + l + sovQuery(uint64(l)) + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryLeasesResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Leases) > 0 { + for _, e := range m.Leases { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryLeaseRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ID.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func (m *QueryLeaseResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Lease.Size() + n += 1 + l + sovQuery(uint64(l)) + l = m.EscrowPayment.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryOrdersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrdersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrdersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrdersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrdersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrdersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Orders", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Orders = append(m.Orders, Order{}) + if err := m.Orders[len(m.Orders)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryOrderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryOrderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryOrderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Order", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Order.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBidsRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBidsRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBidsRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBidsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBidsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBidsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bids", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Bids = append(m.Bids, QueryBidResponse{}) + if err := m.Bids[len(m.Bids)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBidRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBidRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBidRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryBidResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryBidResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryBidResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Bid", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Bid.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EscrowAccount", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EscrowAccount.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLeasesRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLeasesRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLeasesRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Filters.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLeasesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLeasesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLeasesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Leases", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Leases = append(m.Leases, QueryLeaseResponse{}) + if err := m.Leases[len(m.Leases)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLeaseRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLeaseRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLeaseRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ID.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryLeaseResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryLeaseResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryLeaseResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lease", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Lease.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EscrowPayment", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.EscrowPayment.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/market/v1beta5/query.pb.gw.go b/go/node/market/v1beta5/query.pb.gw.go new file mode 100644 index 00000000..8dda8f4c --- /dev/null +++ b/go/node/market/v1beta5/query.pb.gw.go @@ -0,0 +1,586 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/market/v1beta5/query.proto + +/* +Package v1beta5 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1beta5 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_Orders_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrdersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Orders(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Orders_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrdersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Orders_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Orders(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Order_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Order(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Order_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryOrderRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Order_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Order(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Bids_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBidsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Bids(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Bids_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBidsRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bids_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Bids(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Bid_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBidRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Bid(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Bid_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryBidRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Bid_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Bid(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Leases_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLeasesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Leases(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Leases_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLeasesRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Leases_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Leases(ctx, &protoReq) + return msg, metadata, err + +} + +var ( + filter_Query_Lease_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLeaseRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Lease(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Lease_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryLeaseRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Lease_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Lease(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Orders_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Order_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Bids_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Bid_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Leases_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Lease_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Orders_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Orders_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Orders_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Order_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Order_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Order_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bids_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Bids_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bids_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Bid_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Bid_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Bid_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Leases_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Leases_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Leases_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Lease_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Lease_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Lease_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Orders_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "orders", "list"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Order_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "orders", "info"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Bids_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "bids", "list"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Bid_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "bids", "info"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Leases_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "leases", "list"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Lease_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"akash", "market", "v1beta5", "leases", "info"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Orders_0 = runtime.ForwardResponseMessage + + forward_Query_Order_0 = runtime.ForwardResponseMessage + + forward_Query_Bids_0 = runtime.ForwardResponseMessage + + forward_Query_Bid_0 = runtime.ForwardResponseMessage + + forward_Query_Leases_0 = runtime.ForwardResponseMessage + + forward_Query_Lease_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/market/v1beta5/service.pb.go b/go/node/market/v1beta5/service.pb.go new file mode 100644 index 00000000..ded31681 --- /dev/null +++ b/go/node/market/v1beta5/service.pb.go @@ -0,0 +1,287 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/market/v1beta5/service.proto + +package v1beta5 + +import ( + context "context" + fmt "fmt" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func init() { + proto.RegisterFile("akash/market/v1beta5/service.proto", fileDescriptor_f1203af46a0757a8) +} + +var fileDescriptor_f1203af46a0757a8 = []byte{ + // 292 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4a, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0xcf, 0x4d, 0x2c, 0xca, 0x4e, 0x2d, 0xd1, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, 0x49, 0x34, + 0xd5, 0x2f, 0x4e, 0x2d, 0x2a, 0xcb, 0x4c, 0x4e, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, + 0x01, 0xab, 0xd1, 0x83, 0xa8, 0xd1, 0x83, 0xaa, 0x91, 0x92, 0xc3, 0xaa, 0x33, 0x29, 0x33, 0x05, + 0xa2, 0x4b, 0x4a, 0x01, 0xab, 0x7c, 0x4e, 0x6a, 0x62, 0x31, 0xd4, 0x5c, 0xa3, 0x17, 0xcc, 0x5c, + 0xcc, 0xbe, 0xc5, 0xe9, 0x42, 0xd1, 0x5c, 0x9c, 0xce, 0x45, 0xa9, 0x89, 0x25, 0xa9, 0x4e, 0x99, + 0x29, 0x42, 0x4a, 0x7a, 0xd8, 0x6c, 0xd3, 0xf3, 0x2d, 0x4e, 0x87, 0xab, 0x91, 0xd2, 0x22, 0xac, + 0x26, 0x28, 0xb5, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x55, 0x28, 0x82, 0x8b, 0xc3, 0x39, 0x27, 0xbf, + 0x18, 0x6c, 0xb6, 0x22, 0x6e, 0x7d, 0x50, 0x25, 0x52, 0x9a, 0x04, 0x95, 0xc0, 0x4d, 0x4e, 0xe7, + 0xe2, 0x0d, 0xcf, 0x2c, 0xc9, 0x48, 0x29, 0x4a, 0x2c, 0xf7, 0x01, 0xf9, 0x4a, 0x48, 0x0d, 0xa7, + 0x5e, 0x14, 0x75, 0x52, 0x7a, 0xc4, 0xa9, 0x83, 0x5b, 0x94, 0xc8, 0xc5, 0x0d, 0xf1, 0x17, 0xc4, + 0x1a, 0x15, 0x02, 0xbe, 0x87, 0x58, 0xa2, 0x43, 0x8c, 0x2a, 0xb8, 0x15, 0x71, 0x5c, 0x5c, 0x60, + 0xff, 0x41, 0x6c, 0x50, 0xc6, 0x1f, 0x08, 0x10, 0x0b, 0xb4, 0x89, 0x50, 0x04, 0x33, 0xdf, 0x29, + 0xf8, 0xc4, 0x23, 0x39, 0xc6, 0x0b, 0x8f, 0xe4, 0x18, 0x1f, 0x3c, 0x92, 0x63, 0x9c, 0xf0, 0x58, + 0x8e, 0xe1, 0xc2, 0x63, 0x39, 0x86, 0x1b, 0x8f, 0xe5, 0x18, 0xa2, 0x2c, 0xd3, 0x33, 0x4b, 0x32, + 0x4a, 0x93, 0xf4, 0x92, 0xf3, 0x73, 0xf5, 0xc1, 0x06, 0xea, 0xe6, 0xa5, 0x96, 0x94, 0xe7, 0x17, + 0x65, 0x43, 0x79, 0x89, 0x05, 0x99, 0xfa, 0xe9, 0xf9, 0xfa, 0x79, 0xf9, 0x29, 0xa9, 0x68, 0x69, + 0x29, 0x89, 0x0d, 0x9c, 0x8c, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x5e, 0x38, 0xfb, 0xfb, + 0xc4, 0x02, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // CreateBid defines a method to create a bid given proper inputs. + CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) + // CloseBid defines a method to close a bid given proper inputs. + CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) + // WithdrawLease withdraws accrued funds from the lease payment + WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) + // CreateLease creates a new lease + CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) + // CloseLease defines a method to close an order given proper inputs. + CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) CreateBid(ctx context.Context, in *MsgCreateBid, opts ...grpc.CallOption) (*MsgCreateBidResponse, error) { + out := new(MsgCreateBidResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/CreateBid", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CloseBid(ctx context.Context, in *MsgCloseBid, opts ...grpc.CallOption) (*MsgCloseBidResponse, error) { + out := new(MsgCloseBidResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/CloseBid", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) WithdrawLease(ctx context.Context, in *MsgWithdrawLease, opts ...grpc.CallOption) (*MsgWithdrawLeaseResponse, error) { + out := new(MsgWithdrawLeaseResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/WithdrawLease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CreateLease(ctx context.Context, in *MsgCreateLease, opts ...grpc.CallOption) (*MsgCreateLeaseResponse, error) { + out := new(MsgCreateLeaseResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/CreateLease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) CloseLease(ctx context.Context, in *MsgCloseLease, opts ...grpc.CallOption) (*MsgCloseLeaseResponse, error) { + out := new(MsgCloseLeaseResponse) + err := c.cc.Invoke(ctx, "/akash.market.v1beta5.Msg/CloseLease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // CreateBid defines a method to create a bid given proper inputs. + CreateBid(context.Context, *MsgCreateBid) (*MsgCreateBidResponse, error) + // CloseBid defines a method to close a bid given proper inputs. + CloseBid(context.Context, *MsgCloseBid) (*MsgCloseBidResponse, error) + // WithdrawLease withdraws accrued funds from the lease payment + WithdrawLease(context.Context, *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) + // CreateLease creates a new lease + CreateLease(context.Context, *MsgCreateLease) (*MsgCreateLeaseResponse, error) + // CloseLease defines a method to close an order given proper inputs. + CloseLease(context.Context, *MsgCloseLease) (*MsgCloseLeaseResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) CreateBid(ctx context.Context, req *MsgCreateBid) (*MsgCreateBidResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateBid not implemented") +} +func (*UnimplementedMsgServer) CloseBid(ctx context.Context, req *MsgCloseBid) (*MsgCloseBidResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseBid not implemented") +} +func (*UnimplementedMsgServer) WithdrawLease(ctx context.Context, req *MsgWithdrawLease) (*MsgWithdrawLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WithdrawLease not implemented") +} +func (*UnimplementedMsgServer) CreateLease(ctx context.Context, req *MsgCreateLease) (*MsgCreateLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateLease not implemented") +} +func (*UnimplementedMsgServer) CloseLease(ctx context.Context, req *MsgCloseLease) (*MsgCloseLeaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CloseLease not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_CreateBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateBid) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateBid(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/CreateBid", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateBid(ctx, req.(*MsgCreateBid)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CloseBid_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCloseBid) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CloseBid(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/CloseBid", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CloseBid(ctx, req.(*MsgCloseBid)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_WithdrawLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgWithdrawLease) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).WithdrawLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/WithdrawLease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).WithdrawLease(ctx, req.(*MsgWithdrawLease)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CreateLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateLease) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/CreateLease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateLease(ctx, req.(*MsgCreateLease)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_CloseLease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCloseLease) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CloseLease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.market.v1beta5.Msg/CloseLease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CloseLease(ctx, req.(*MsgCloseLease)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.market.v1beta5.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateBid", + Handler: _Msg_CreateBid_Handler, + }, + { + MethodName: "CloseBid", + Handler: _Msg_CloseBid_Handler, + }, + { + MethodName: "WithdrawLease", + Handler: _Msg_WithdrawLease_Handler, + }, + { + MethodName: "CreateLease", + Handler: _Msg_CreateLease_Handler, + }, + { + MethodName: "CloseLease", + Handler: _Msg_CloseLease_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/market/v1beta5/service.proto", +} diff --git a/go/node/market/v1beta5/types.go b/go/node/market/v1beta5/types.go new file mode 100644 index 00000000..e87eb274 --- /dev/null +++ b/go/node/market/v1beta5/types.go @@ -0,0 +1,244 @@ +package v1beta5 + +import ( + "strings" + + "gopkg.in/yaml.v3" + + sdk "github.com/cosmos/cosmos-sdk/types" + + atypes "github.com/akash-network/akash-api/go/node/audit/v1beta4" + attr "github.com/akash-network/akash-api/go/node/types/attributes/v1" +) + +const ( + APIVersion = "v1beta5" +) + +// ID method returns OrderID details of specific order +func (o Order) ID() OrderID { + return o.OrderID +} + +// String implements the Stringer interface for a Order object. +func (o Order) String() string { + out, _ := yaml.Marshal(o) + return string(out) +} + +// Orders is a collection of Order +type Orders []Order + +// String implements the Stringer interface for a Orders object. +func (o Orders) String() string { + var out string + for _, order := range o { + out += order.String() + "\n" + } + + return strings.TrimSpace(out) +} + +// ValidateCanBid method validates whether order is open or not and +// returns error if not +func (o Order) ValidateCanBid() error { + switch o.State { + case OrderOpen: + return nil + case OrderActive: + return ErrOrderActive + default: + return ErrOrderClosed + } +} + +// ValidateInactive method validates whether order is open or not and +// returns error if not +func (o Order) ValidateInactive() error { + switch o.State { + case OrderClosed: + return nil + case OrderActive: + return ErrOrderActive + default: + return ErrOrderClosed + } +} + +// Price method returns price of specific order +func (o Order) Price() sdk.DecCoin { + return o.Spec.Price() +} + +// MatchAttributes method compares provided attributes with specific order attributes +func (o Order) MatchAttributes(attrs attr.Attributes) bool { + return o.Spec.MatchAttributes(attrs) +} + +// MatchRequirements method compares provided attributes with specific order attributes +func (o Order) MatchRequirements(prov []atypes.Provider) bool { + return o.Spec.MatchRequirements(prov) +} + +// MatchResourcesRequirements method compares provider capabilities with specific order resources attributes +func (o Order) MatchResourcesRequirements(attr attr.Attributes) bool { + return o.Spec.MatchResourcesRequirements(attr) +} + +// Accept returns whether order filters valid or not +func (filters OrderFilters) Accept(obj Order, stateVal Order_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != obj.OrderID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != obj.OrderID.DSeq { + return false + } + + // Checking gseq filter + if filters.GSeq != 0 && filters.GSeq != obj.OrderID.GSeq { + return false + } + + // Checking oseq filter + if filters.OSeq != 0 && filters.OSeq != obj.OrderID.OSeq { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != obj.State { + return false + } + + return true +} + +// ID method returns BidID details of specific bid +func (obj Bid) ID() BidID { + return obj.BidID +} + +// String implements the Stringer interface for a Bid object. +func (obj Bid) String() string { + out, _ := yaml.Marshal(obj) + return string(out) +} + +// Bids is a collection of Bid +type Bids []Bid + +// String implements the Stringer interface for a Bids object. +func (b Bids) String() string { + var out string + for _, bid := range b { + out += bid.String() + "\n" + } + + return strings.TrimSpace(out) +} + +// Accept returns whether bid filters valid or not +func (filters BidFilters) Accept(obj Bid, stateVal Bid_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != obj.BidID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != obj.BidID.DSeq { + return false + } + + // Checking gseq filter + if filters.GSeq != 0 && filters.GSeq != obj.BidID.GSeq { + return false + } + + // Checking oseq filter + if filters.OSeq != 0 && filters.OSeq != obj.BidID.OSeq { + return false + } + + // Checking provider filter + if filters.Provider != "" && filters.Provider != obj.BidID.Provider { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != obj.State { + return false + } + + return true +} + +// ID method returns LeaseID details of specific lease +func (obj Lease) ID() LeaseID { + return obj.LeaseID +} + +// String implements the Stringer interface for a Lease object. +func (obj Lease) String() string { + out, _ := yaml.Marshal(obj) + return string(out) +} + +// Leases is a collection of Lease +type Leases []Lease + +// String implements the Stringer interface for a Leases object. +func (l Leases) String() string { + var out string + for _, order := range l { + out += order.String() + "\n" + } + + return strings.TrimSpace(out) +} + +// Accept returns whether lease filters valid or not +func (filters LeaseFilters) Accept(obj Lease, stateVal Lease_State) bool { + // Checking owner filter + if filters.Owner != "" && filters.Owner != obj.LeaseID.Owner { + return false + } + + // Checking dseq filter + if filters.DSeq != 0 && filters.DSeq != obj.LeaseID.DSeq { + return false + } + + // Checking gseq filter + if filters.GSeq != 0 && filters.GSeq != obj.LeaseID.GSeq { + return false + } + + // Checking oseq filter + if filters.OSeq != 0 && filters.OSeq != obj.LeaseID.OSeq { + return false + } + + // Checking provider filter + if filters.Provider != "" && filters.Provider != obj.LeaseID.Provider { + return false + } + + // Checking state filter + if stateVal != 0 && stateVal != obj.State { + return false + } + + return true +} + +func (m QueryLeasesResponse) TotalPriceAmount() sdk.Dec { + total := sdk.NewDec(0) + + for _, lease := range m.Leases { + total = total.Add(lease.Lease.Price.Amount) + } + + return total +} diff --git a/go/node/provider/v1beta3/errors.go b/go/node/provider/v1beta3/errors.go index b5b7a5c1..6732bd6d 100644 --- a/go/node/provider/v1beta3/errors.go +++ b/go/node/provider/v1beta3/errors.go @@ -1,42 +1,31 @@ package v1beta3 import ( - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" -) - -const ( - errInvalidProviderURI uint32 = iota + 1 - errNotAbsProviderURI - errProviderNotFound - errProviderExists - errInvalidAddress - errAttributes - errIncompatibleAttributes - errInvalidInfoWebsite + "errors" ) var ( // ErrInvalidProviderURI register error code for invalid provider uri - ErrInvalidProviderURI = sdkerrors.Register(ModuleName, errInvalidProviderURI, "invalid provider: invalid host uri") + ErrInvalidProviderURI = errors.New("invalid provider: invalid host uri") // ErrNotAbsProviderURI register error code for not absolute provider uri - ErrNotAbsProviderURI = sdkerrors.Register(ModuleName, errNotAbsProviderURI, "invalid provider: not absolute host uri") + ErrNotAbsProviderURI = errors.New("invalid provider: not absolute host uri") // ErrProviderNotFound provider not found - ErrProviderNotFound = sdkerrors.Register(ModuleName, errProviderNotFound, "invalid provider: address not found") + ErrProviderNotFound = errors.New("invalid provider: address not found") // ErrProviderExists provider already exists - ErrProviderExists = sdkerrors.Register(ModuleName, errProviderExists, "invalid provider: already exists") + ErrProviderExists = errors.New("invalid provider: already exists") // ErrInvalidAddress invalid provider address - ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") + ErrInvalidAddress = errors.New("invalid address") // ErrAttributes error code for provider attribute problems - ErrAttributes = sdkerrors.Register(ModuleName, errAttributes, "attribute specification error") + ErrAttributes = errors.New("attribute specification error") // ErrIncompatibleAttributes error code for attributes update - ErrIncompatibleAttributes = sdkerrors.Register(ModuleName, errIncompatibleAttributes, "attributes cannot be changed") + ErrIncompatibleAttributes = errors.New("attributes cannot be changed") // ErrInvalidInfoWebsite register error code for invalid info website - ErrInvalidInfoWebsite = sdkerrors.Register(ModuleName, errInvalidInfoWebsite, "invalid provider: invalid info website") + ErrInvalidInfoWebsite = errors.New("invalid provider: invalid info website") ) diff --git a/go/node/provider/v1beta4/codec.go b/go/node/provider/v1beta4/codec.go new file mode 100644 index 00000000..0f6c4e0d --- /dev/null +++ b/go/node/provider/v1beta4/codec.go @@ -0,0 +1,45 @@ +package v1beta4 + +import ( + "github.com/cosmos/cosmos-sdk/codec" + cdctypes "github.com/cosmos/cosmos-sdk/codec/types" + cryptocodec "github.com/cosmos/cosmos-sdk/crypto/codec" + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/cosmos/cosmos-sdk/types/msgservice" +) + +var ( + amino = codec.NewLegacyAmino() + + // ModuleCdc references the global x/provider module codec. Note, the codec should + // ONLY be used in certain instances of tests and for JSON encoding as Amino is + // still used for that purpose. + // + // The actual codec used for serialization should be provided to x/provider and + // defined at the application level. + ModuleCdc = codec.NewProtoCodec(cdctypes.NewInterfaceRegistry()) +) + +func init() { + RegisterLegacyAminoCodec(amino) + cryptocodec.RegisterCrypto(amino) + amino.Seal() +} + +// RegisterCodec register concrete types on codec +func RegisterLegacyAminoCodec(cdc *codec.LegacyAmino) { + cdc.RegisterConcrete(&MsgCreateProvider{}, ModuleName+"/"+MsgTypeCreateProvider, nil) + cdc.RegisterConcrete(&MsgUpdateProvider{}, ModuleName+"/"+MsgTypeUpdateProvider, nil) + cdc.RegisterConcrete(&MsgDeleteProvider{}, ModuleName+"/"+MsgTypeDeleteProvider, nil) +} + +// RegisterInterfaces registers the x/provider interfaces types with the interface registry +func RegisterInterfaces(registry cdctypes.InterfaceRegistry) { + registry.RegisterImplementations((*sdk.Msg)(nil), + &MsgCreateProvider{}, + &MsgUpdateProvider{}, + &MsgDeleteProvider{}, + ) + + msgservice.RegisterMsgServiceDesc(registry, &_Msg_serviceDesc) +} diff --git a/go/node/provider/v1beta4/errors.go b/go/node/provider/v1beta4/errors.go new file mode 100644 index 00000000..d9231732 --- /dev/null +++ b/go/node/provider/v1beta4/errors.go @@ -0,0 +1,42 @@ +package v1beta4 + +import ( + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + errInvalidProviderURI uint32 = iota + 1 + errNotAbsProviderURI + errProviderNotFound + errProviderExists + errInvalidAddress + errAttributes + errIncompatibleAttributes + errInvalidInfoWebsite +) + +var ( + // ErrInvalidProviderURI register error code for invalid provider uri + ErrInvalidProviderURI = sdkerrors.Register(ModuleName, errInvalidProviderURI, "invalid provider: invalid host uri") + + // ErrNotAbsProviderURI register error code for not absolute provider uri + ErrNotAbsProviderURI = sdkerrors.Register(ModuleName, errNotAbsProviderURI, "invalid provider: not absolute host uri") + + // ErrProviderNotFound provider not found + ErrProviderNotFound = sdkerrors.Register(ModuleName, errProviderNotFound, "invalid provider: address not found") + + // ErrProviderExists provider already exists + ErrProviderExists = sdkerrors.Register(ModuleName, errProviderExists, "invalid provider: already exists") + + // ErrInvalidAddress invalid provider address + ErrInvalidAddress = sdkerrors.Register(ModuleName, errInvalidAddress, "invalid address") + + // ErrAttributes error code for provider attribute problems + ErrAttributes = sdkerrors.Register(ModuleName, errAttributes, "attribute specification error") + + // ErrIncompatibleAttributes error code for attributes update + ErrIncompatibleAttributes = sdkerrors.Register(ModuleName, errIncompatibleAttributes, "attributes cannot be changed") + + // ErrInvalidInfoWebsite register error code for invalid info website + ErrInvalidInfoWebsite = sdkerrors.Register(ModuleName, errInvalidInfoWebsite, "invalid provider: invalid info website") +) diff --git a/go/node/provider/v1beta4/event.go b/go/node/provider/v1beta4/event.go new file mode 100644 index 00000000..6a6a46b7 --- /dev/null +++ b/go/node/provider/v1beta4/event.go @@ -0,0 +1,142 @@ +package v1beta4 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + + "github.com/akash-network/akash-api/go/sdkutil" +) + +const ( + EvActionProviderCreated = "provider-created" + EvActionProviderUpdated = "provider-updated" + EvActionProviderDeleted = "provider-deleted" + EvOwnerKey = "owner" +) + +// EventProviderCreated struct +type EventProviderCreated struct { + Context sdkutil.BaseModuleEvent `json:"context"` + Owner sdk.AccAddress `json:"owner"` +} + +func NewEventProviderCreated(owner sdk.AccAddress) EventProviderCreated { + return EventProviderCreated{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: EvActionProviderCreated, + }, + Owner: owner, + } +} + +// ToSDKEvent method creates new sdk event for EventProviderCreated struct +func (ev EventProviderCreated) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, EvActionProviderCreated), + }, ProviderEVAttributes(ev.Owner)...)..., + ) +} + +// EventProviderUpdated struct +type EventProviderUpdated struct { + Context sdkutil.BaseModuleEvent `json:"context"` + Owner sdk.AccAddress `json:"owner"` +} + +func NewEventProviderUpdated(owner sdk.AccAddress) EventProviderUpdated { + return EventProviderUpdated{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: EvActionProviderUpdated, + }, + Owner: owner, + } +} + +// ToSDKEvent method creates new sdk event for EventProviderUpdated struct +func (ev EventProviderUpdated) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, EvActionProviderUpdated), + }, ProviderEVAttributes(ev.Owner)...)..., + ) +} + +// EventProviderDeleted struct +type EventProviderDeleted struct { + Context sdkutil.BaseModuleEvent `json:"context"` + Owner sdk.AccAddress `json:"owner"` +} + +func NewEventProviderDeleted(owner sdk.AccAddress) EventProviderDeleted { + return EventProviderDeleted{ + Context: sdkutil.BaseModuleEvent{ + Module: ModuleName, + Action: EvActionProviderDeleted, + }, + Owner: owner, + } +} + +// ToSDKEvent method creates new sdk event for EventProviderDeleted struct +func (ev EventProviderDeleted) ToSDKEvent() sdk.Event { + return sdk.NewEvent(sdkutil.EventTypeMessage, + append([]sdk.Attribute{ + sdk.NewAttribute(sdk.AttributeKeyModule, ModuleName), + sdk.NewAttribute(sdk.AttributeKeyAction, EvActionProviderDeleted), + }, ProviderEVAttributes(ev.Owner)...)..., + ) +} + +// ProviderEVAttributes returns event attribues for given Provider +func ProviderEVAttributes(owner sdk.AccAddress) []sdk.Attribute { + return []sdk.Attribute{ + sdk.NewAttribute(EvOwnerKey, owner.String()), + } +} + +// ParseEVProvider returns provider details for given event attributes +func ParseEVProvider(attrs []sdk.Attribute) (sdk.AccAddress, error) { + owner, err := sdkutil.GetAccAddress(attrs, EvOwnerKey) + if err != nil { + return sdk.AccAddress{}, err + } + + return owner, nil +} + +// ParseEvent parses event and returns details of event and error if occurred +// TODO: Enable returning actual events. +func ParseEvent(ev sdkutil.Event) (sdkutil.ModuleEvent, error) { + if ev.Type != sdkutil.EventTypeMessage { + return nil, sdkutil.ErrUnknownType + } + if ev.Module != ModuleName { + return nil, sdkutil.ErrUnknownModule + } + switch ev.Action { + case EvActionProviderCreated: + owner, err := ParseEVProvider(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventProviderCreated(owner), nil + case EvActionProviderUpdated: + owner, err := ParseEVProvider(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventProviderUpdated(owner), nil + case EvActionProviderDeleted: + owner, err := ParseEVProvider(ev.Attributes) + if err != nil { + return nil, err + } + return NewEventProviderDeleted(owner), nil + default: + return nil, sdkutil.ErrUnknownAction + } +} diff --git a/go/node/provider/v1beta4/events_test.go b/go/node/provider/v1beta4/events_test.go new file mode 100644 index 00000000..35ed83a7 --- /dev/null +++ b/go/node/provider/v1beta4/events_test.go @@ -0,0 +1,195 @@ +package v1beta4_test + +import ( + "fmt" + "testing" + + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" + + types "github.com/akash-network/akash-api/go/node/provider/v1beta4" + "github.com/akash-network/akash-api/go/sdkutil" + _ "github.com/akash-network/akash-api/go/testutil" +) + +var ( + errWildcard = errors.New("wildcard string error can't be matched") +) + +type testEventParsing struct { + msg sdkutil.Event + expErr error +} + +func (tep testEventParsing) testMessageType() func(t *testing.T) { + _, err := types.ParseEvent(tep.msg) + return func(t *testing.T) { + // if the error expected is errWildcard to catch untyped errors, don't fail the test, the error was expected. + if errors.Is(tep.expErr, errWildcard) { + require.Error(t, err) + } else { + require.Equal(t, tep.expErr, err) + } + } +} + +var TEPS = []testEventParsing{ + { + msg: sdkutil.Event{ + Type: "nil", + }, + expErr: sdkutil.ErrUnknownType, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + }, + expErr: sdkutil.ErrUnknownModule, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + }, + expErr: sdkutil.ErrUnknownAction, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: "nil", + }, + expErr: sdkutil.ErrUnknownModule, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: "nil", + }, + expErr: sdkutil.ErrUnknownAction, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: types.EvActionProviderCreated, + Attributes: []sdk.Attribute{ + { + Key: types.EvOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: types.EvActionProviderCreated, + Attributes: []sdk.Attribute{ + { + Key: types.EvOwnerKey, + Value: "hello", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: types.EvActionProviderCreated, + Attributes: []sdk.Attribute{}, + }, + expErr: errWildcard, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: types.EvActionProviderUpdated, + Attributes: []sdk.Attribute{ + { + Key: types.EvOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: types.EvActionProviderUpdated, + Attributes: []sdk.Attribute{ + { + Key: types.EvOwnerKey, + Value: "hello", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: types.EvActionProviderUpdated, + Attributes: []sdk.Attribute{}, + }, + expErr: errWildcard, + }, + + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: types.EvActionProviderDeleted, + Attributes: []sdk.Attribute{ + { + Key: types.EvOwnerKey, + Value: "akash1qtqpdszzakz7ugkey7ka2cmss95z26ygar2mgr", + }, + }, + }, + expErr: nil, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: types.EvActionProviderDeleted, + Attributes: []sdk.Attribute{ + { + Key: types.EvOwnerKey, + Value: "hello", + }, + }, + }, + expErr: errWildcard, + }, + { + msg: sdkutil.Event{ + Type: sdkutil.EventTypeMessage, + Module: types.ModuleName, + Action: types.EvActionProviderDeleted, + Attributes: []sdk.Attribute{}, + }, + expErr: errWildcard, + }, +} + +func TestEventParsing(t *testing.T) { + for i, test := range TEPS { + t.Run(fmt.Sprintf("%d", i), + test.testMessageType()) + } +} diff --git a/go/node/provider/v1beta4/genesis.pb.go b/go/node/provider/v1beta4/genesis.pb.go new file mode 100644 index 00000000..f56eb97a --- /dev/null +++ b/go/node/provider/v1beta4/genesis.pb.go @@ -0,0 +1,334 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/v1beta4/genesis.proto + +package v1beta4 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GenesisState defines the basic genesis state used by provider module +type GenesisState struct { + Providers []Provider `protobuf:"bytes,1,rep,name=providers,proto3" json:"providers" yaml:"providers"` +} + +func (m *GenesisState) Reset() { *m = GenesisState{} } +func (m *GenesisState) String() string { return proto.CompactTextString(m) } +func (*GenesisState) ProtoMessage() {} +func (*GenesisState) Descriptor() ([]byte, []int) { + return fileDescriptor_045bfc9d93eb382e, []int{0} +} +func (m *GenesisState) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GenesisState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GenesisState.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GenesisState) XXX_Merge(src proto.Message) { + xxx_messageInfo_GenesisState.Merge(m, src) +} +func (m *GenesisState) XXX_Size() int { + return m.Size() +} +func (m *GenesisState) XXX_DiscardUnknown() { + xxx_messageInfo_GenesisState.DiscardUnknown(m) +} + +var xxx_messageInfo_GenesisState proto.InternalMessageInfo + +func (m *GenesisState) GetProviders() []Provider { + if m != nil { + return m.Providers + } + return nil +} + +func init() { + proto.RegisterType((*GenesisState)(nil), "akash.provider.v1beta4.GenesisState") +} + +func init() { + proto.RegisterFile("akash/provider/v1beta4/genesis.proto", fileDescriptor_045bfc9d93eb382e) +} + +var fileDescriptor_045bfc9d93eb382e = []byte{ + // 226 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x2f, 0x28, 0xca, 0x2f, 0xcb, 0x4c, 0x49, 0x2d, 0xd2, 0x2f, 0x33, 0x4c, 0x4a, 0x2d, + 0x49, 0x34, 0xd1, 0x4f, 0x4f, 0xcd, 0x4b, 0x2d, 0xce, 0x2c, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x12, 0x03, 0xab, 0xd2, 0x83, 0xa9, 0xd2, 0x83, 0xaa, 0x92, 0x12, 0x49, 0xcf, 0x4f, 0xcf, + 0x07, 0x2b, 0xd1, 0x07, 0xb1, 0x20, 0xaa, 0xa5, 0x54, 0x71, 0x98, 0x09, 0xd7, 0x0e, 0x56, 0xa6, + 0x54, 0xca, 0xc5, 0xe3, 0x0e, 0xb1, 0x25, 0xb8, 0x24, 0xb1, 0x24, 0x55, 0x28, 0x95, 0x8b, 0x13, + 0xa6, 0xa2, 0x58, 0x82, 0x51, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x41, 0x0f, 0xbb, 0xc5, 0x7a, 0x01, + 0x50, 0x01, 0x27, 0xd5, 0x13, 0xf7, 0xe4, 0x19, 0x5e, 0xdd, 0x93, 0x47, 0x68, 0xfd, 0x74, 0x4f, + 0x5e, 0xa0, 0x32, 0x31, 0x37, 0xc7, 0x4a, 0x09, 0x2e, 0xa4, 0x14, 0x84, 0x90, 0x76, 0x0a, 0x3d, + 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, + 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, 0x86, 0x28, 0xeb, 0xf4, 0xcc, 0x92, 0x8c, 0xd2, + 0x24, 0xbd, 0xe4, 0xfc, 0x5c, 0x7d, 0xb0, 0xbd, 0xba, 0x79, 0xa9, 0x25, 0xe5, 0xf9, 0x45, 0xd9, + 0x50, 0x5e, 0x62, 0x41, 0xa6, 0x7e, 0x7a, 0xbe, 0x7e, 0x5e, 0x7e, 0x4a, 0x2a, 0x86, 0xe7, 0x92, + 0xd8, 0xc0, 0x9e, 0x32, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x45, 0x82, 0x89, 0xec, 0x51, 0x01, + 0x00, 0x00, +} + +func (m *GenesisState) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GenesisState) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GenesisState) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Providers) > 0 { + for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenesis(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func encodeVarintGenesis(dAtA []byte, offset int, v uint64) int { + offset -= sovGenesis(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GenesisState) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Providers) > 0 { + for _, e := range m.Providers { + l = e.Size() + n += 1 + l + sovGenesis(uint64(l)) + } + } + return n +} + +func sovGenesis(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenesis(x uint64) (n int) { + return sovGenesis(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GenesisState) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GenesisState: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GenesisState: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenesis + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenesis + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenesis + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Providers = append(m.Providers, Provider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenesis(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenesis + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenesis(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenesis + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenesis + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenesis + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenesis + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenesis = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenesis = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenesis = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/provider/v1beta4/key.go b/go/node/provider/v1beta4/key.go new file mode 100644 index 00000000..a254c07e --- /dev/null +++ b/go/node/provider/v1beta4/key.go @@ -0,0 +1,12 @@ +package v1beta4 + +const ( + // ModuleName is the module name constant used in many places + ModuleName = "provider" + + // StoreKey is the store key string for provider + StoreKey = ModuleName + + // RouterKey is the message route for provider + RouterKey = ModuleName +) diff --git a/go/node/provider/v1beta4/migrate/v1beta3.go b/go/node/provider/v1beta4/migrate/v1beta3.go new file mode 100644 index 00000000..040c4c06 --- /dev/null +++ b/go/node/provider/v1beta4/migrate/v1beta3.go @@ -0,0 +1,23 @@ +package migrate + +import ( + "github.com/akash-network/akash-api/go/node/provider/v1beta3" + "github.com/akash-network/akash-api/go/node/provider/v1beta4" + amigrate "github.com/akash-network/akash-api/go/node/types/attributes/v1/migrate" +) + +func ProviderFromV1beta3(from v1beta3.Provider) v1beta4.Provider { + return v1beta4.Provider{ + Owner: from.Owner, + HostURI: from.HostURI, + Attributes: amigrate.AttributesFromV1Beta3(from.Attributes), + Info: v1beta4.ProviderInfo{}, + } +} + +func ProviderInfoFromV1beta3(from v1beta3.ProviderInfo) v1beta4.ProviderInfo { + return v1beta4.ProviderInfo{ + EMail: from.EMail, + Website: from.Website, + } +} diff --git a/go/node/provider/v1beta4/msgs.go b/go/node/provider/v1beta4/msgs.go new file mode 100644 index 00000000..fa799e9d --- /dev/null +++ b/go/node/provider/v1beta4/msgs.go @@ -0,0 +1,177 @@ +package v1beta4 + +import ( + "net/url" + "regexp" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + "github.com/pkg/errors" + + attr "github.com/akash-network/akash-api/go/node/types/attributes/v1" +) + +const ( + MsgTypeCreateProvider = "create-provider" + MsgTypeUpdateProvider = "update-provider" + MsgTypeDeleteProvider = "delete-provider" +) + +var ( + _, _, _ sdk.Msg = &MsgCreateProvider{}, &MsgUpdateProvider{}, &MsgDeleteProvider{} + attributeNameRegexp = regexp.MustCompile(attr.AttributeNameRegexpString) +) + +// NewMsgCreateProvider creates a new MsgCreateProvider instance +func NewMsgCreateProvider(owner sdk.AccAddress, hostURI string, attributes attr.Attributes) *MsgCreateProvider { + return &MsgCreateProvider{ + Owner: owner.String(), + HostURI: hostURI, + Attributes: attributes, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgCreateProvider) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgCreateProvider) Type() string { return MsgTypeCreateProvider } + +// ValidateBasic does basic validation of a HostURI +func (msg MsgCreateProvider) ValidateBasic() error { + if err := validateProviderURI(msg.HostURI); err != nil { + return err + } + if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgCreate: Invalid Provider Address") + } + if err := msg.Attributes.ValidateWithRegex(attributeNameRegexp); err != nil { + return err + } + if err := msg.Info.Validate(); err != nil { + return err + } + return nil +} + +// GetSignBytes encodes the message for signing +func (msg MsgCreateProvider) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgCreateProvider) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgUpdateProvider creates a new MsgUpdateProvider instance +func NewMsgUpdateProvider(owner sdk.AccAddress, hostURI string, attributes attr.Attributes) *MsgUpdateProvider { + return &MsgUpdateProvider{ + Owner: owner.String(), + HostURI: hostURI, + Attributes: attributes, + } +} + +// Route implements the sdk.Msg interface +func (msg MsgUpdateProvider) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgUpdateProvider) Type() string { return MsgTypeUpdateProvider } + +// ValidateBasic does basic validation of a ProviderURI +func (msg MsgUpdateProvider) ValidateBasic() error { + if err := validateProviderURI(msg.HostURI); err != nil { + return err + } + if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgUpdate: Invalid Provider Address") + } + if err := msg.Attributes.ValidateWithRegex(attributeNameRegexp); err != nil { + return err + } + if err := msg.Info.Validate(); err != nil { + return err + } + return nil +} + +// GetSignBytes encodes the message for signing +func (msg MsgUpdateProvider) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgUpdateProvider) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +// NewMsgDeleteProvider creates a new MsgDeleteProvider instance +func NewMsgDeleteProvider(owner sdk.AccAddress) *MsgDeleteProvider { + return &MsgDeleteProvider{ + Owner: owner.String(), + } +} + +// Route implements the sdk.Msg interface +func (msg MsgDeleteProvider) Route() string { return RouterKey } + +// Type implements the sdk.Msg interface +func (msg MsgDeleteProvider) Type() string { return MsgTypeDeleteProvider } + +// ValidateBasic does basic validation +func (msg MsgDeleteProvider) ValidateBasic() error { + if _, err := sdk.AccAddressFromBech32(msg.Owner); err != nil { + return sdkerrors.Wrap(sdkerrors.ErrInvalidAddress, "MsgDelete: Invalid Provider Address") + } + return nil +} + +// GetSignBytes encodes the message for signing +func (msg MsgDeleteProvider) GetSignBytes() []byte { + return sdk.MustSortJSON(ModuleCdc.MustMarshalJSON(&msg)) +} + +// GetSigners defines whose signature is required +func (msg MsgDeleteProvider) GetSigners() []sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(msg.Owner) + if err != nil { + panic(err) + } + + return []sdk.AccAddress{owner} +} + +func validateProviderURI(val string) error { + u, err := url.Parse(val) + if err != nil { + return ErrInvalidProviderURI + } + if !u.IsAbs() { + return errors.Wrapf(ErrNotAbsProviderURI, "validating %q for absolute URI", val) + } + + if u.Scheme != "https" { + return errors.Wrapf(ErrInvalidProviderURI, "scheme in %q should be https", val) + } + + if u.Host == "" { + return errors.Wrapf(ErrInvalidProviderURI, "validating %q for valid host", val) + } + + if u.Path != "" { + return errors.Wrapf(ErrInvalidProviderURI, "path in %q should be empty", val) + } + + return nil +} diff --git a/go/node/provider/v1beta4/msgs_test.go b/go/node/provider/v1beta4/msgs_test.go new file mode 100644 index 00000000..61ea9139 --- /dev/null +++ b/go/node/provider/v1beta4/msgs_test.go @@ -0,0 +1,255 @@ +package v1beta4 + +import ( + "fmt" + "net/url" + "testing" + + "github.com/pkg/errors" + + sdk "github.com/cosmos/cosmos-sdk/types" + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" + + attr "github.com/akash-network/akash-api/go/node/types/attributes/v1" +) + +func TestConfigPath(t *testing.T) { + type testConfigPath struct { + path string + expErr error + } + tests := []testConfigPath{ + { + path: "foo.yaml", + expErr: ErrNotAbsProviderURI, + }, + { + path: "localhost", + expErr: ErrNotAbsProviderURI, + }, + { + path: "localhost/foo", + expErr: ErrNotAbsProviderURI, + }, + { + path: "localhost:80", + expErr: ErrInvalidProviderURI, + }, + { + path: "localhost:80/foo", + expErr: ErrInvalidProviderURI, + }, + { + path: "127.0.0.1", + expErr: ErrNotAbsProviderURI, + }, + { + path: "127.0.0.1/foo", + expErr: ErrNotAbsProviderURI, + }, + { + path: "127.0.0.1:80", + expErr: ErrInvalidProviderURI, + }, + { + path: "127.0.0.1:80/foo", + expErr: ErrInvalidProviderURI, + }, + { + path: "file:///foo.yaml", + expErr: ErrInvalidProviderURI, + }, + { + path: "https://localhost", + expErr: nil, + }, + { + path: "http://localhost/foo", + expErr: ErrInvalidProviderURI, + }, + { + path: "https://localhost:80", + expErr: nil, + }, + { + path: "http://localhost:80/foo", + expErr: ErrInvalidProviderURI, + }, + { + path: "http://localhost:3001/", + expErr: ErrInvalidProviderURI, + }, + { + path: "https://localhost:80", + expErr: nil, + }, + { + path: "https://localhost:80/foo", + expErr: ErrInvalidProviderURI, + }, + } + + for i, testUnit := range tests { + closure := func(test testConfigPath) func(t *testing.T) { + testFunc := func(t *testing.T) { + err := validateProviderURI(test.path) + if test.expErr != nil && !errors.Is(err, test.expErr) || + err != nil && test.expErr == nil { + t.Errorf("unexpected error occurred: %v", err) + + _, err := url.Parse(test.path) + if err != nil { + t.Errorf("url.Parse() of %q err: %v", test.path, err) + } + } + } + return testFunc + } + tf := closure(testUnit) + t.Run(fmt.Sprintf("%d->%q", i, testUnit.path), tf) + } +} + +type providerTestParams struct { + msg Provider + expErr error + delErr error +} + +func (test providerTestParams) testCreate() func(t *testing.T) { + msg := MsgCreateProvider{ + Owner: test.msg.Owner, + HostURI: test.msg.HostURI, + Attributes: test.msg.Attributes, + } + vErr := msg.ValidateBasic() + return func(t *testing.T) { + if test.expErr != nil && !errors.Is(vErr, test.expErr) { + t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) + return + } + sb := msg.GetSignBytes() + if len(sb) == 0 { + t.Error("no signed bytes returned") + } + } +} + +func (test providerTestParams) testUpdate() func(t *testing.T) { + msg := MsgUpdateProvider{ + Owner: test.msg.Owner, + HostURI: test.msg.HostURI, + Attributes: test.msg.Attributes, + } + vErr := msg.ValidateBasic() + return func(t *testing.T) { + if test.expErr != nil && !errors.Is(vErr, test.expErr) { + t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) + return + } + sb := msg.GetSignBytes() + if len(sb) == 0 { + t.Error("no signed bytes returned") + } + } +} + +func (test providerTestParams) testDelete() func(t *testing.T) { + msg := MsgDeleteProvider{ + Owner: test.msg.Owner, + } + vErr := msg.ValidateBasic() + return func(t *testing.T) { + if test.delErr != nil && !errors.Is(vErr, test.delErr) { + t.Errorf("error expected: '%v' VS: %v", test.expErr, vErr) + return + } + sb := msg.GetSignBytes() + if len(sb) == 0 { + t.Error("no signed bytes returned") + } + } +} + +var msgCreateTests = []providerTestParams{ + { + msg: Provider{ + Owner: sdk.AccAddress("hihi").String(), + HostURI: "https://localhost:3001", + Attributes: attr.Attributes{ + { + Key: "hihi", + Value: "neh", + }, + }, + }, + expErr: nil, + }, + { + msg: Provider{ + Owner: sdk.AccAddress("hihi").String(), + HostURI: "https://localhost:3001", + Attributes: attr.Attributes{ + { + Key: "hihi*", + Value: "neh", + }, + }, + }, + expErr: types.ErrInvalidAttributeKey, + }, + { + msg: Provider{ + Owner: sdk.AccAddress("").String(), + HostURI: "https://localhost:3001", + Attributes: attr.Attributes{ + { + Key: "hihi", + Value: "neh", + }, + }, + }, + expErr: sdkerrors.ErrInvalidAddress, + delErr: sdkerrors.ErrInvalidAddress, + }, + { + msg: Provider{ + Owner: sdk.AccAddress("hihi").String(), + HostURI: "ht tp://foo.com", + Attributes: attr.Attributes{ + { + Key: "hihi", + Value: "neh", + }, + }, + }, + expErr: ErrInvalidProviderURI, + }, + { + msg: Provider{ + Owner: sdk.AccAddress("hihi").String(), + HostURI: "", + Attributes: attr.Attributes{ + { + Key: "hihi", + Value: "neh", + }, + }, + }, + expErr: ErrNotAbsProviderURI, + }, +} + +func TestMsgStarValidation(t *testing.T) { + for i, test := range msgCreateTests { + main := func(test providerTestParams) func(t *testing.T) { + return func(t *testing.T) { + t.Run("msg-create", test.testCreate()) + t.Run("msg-update", test.testUpdate()) + t.Run("msg-delete", test.testDelete()) + } + } + f := main(test) + t.Run(fmt.Sprintf("%d", i), f) + } +} diff --git a/go/node/provider/v1beta4/provider.pb.go b/go/node/provider/v1beta4/provider.pb.go new file mode 100644 index 00000000..b8e01aa7 --- /dev/null +++ b/go/node/provider/v1beta4/provider.pb.go @@ -0,0 +1,2102 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/v1beta4/provider.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + github_com_akash_network_akash_api_go_node_types_attributes_v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// ProviderInfo +type ProviderInfo struct { + EMail string `protobuf:"bytes,1,opt,name=email,proto3" json:"email" yaml:"email"` + Website string `protobuf:"bytes,2,opt,name=website,proto3" json:"website" yaml:"website"` +} + +func (m *ProviderInfo) Reset() { *m = ProviderInfo{} } +func (m *ProviderInfo) String() string { return proto.CompactTextString(m) } +func (*ProviderInfo) ProtoMessage() {} +func (*ProviderInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{0} +} +func (m *ProviderInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProviderInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProviderInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ProviderInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProviderInfo.Merge(m, src) +} +func (m *ProviderInfo) XXX_Size() int { + return m.Size() +} +func (m *ProviderInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ProviderInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ProviderInfo proto.InternalMessageInfo + +func (m *ProviderInfo) GetEMail() string { + if m != nil { + return m.EMail + } + return "" +} + +func (m *ProviderInfo) GetWebsite() string { + if m != nil { + return m.Website + } + return "" +} + +// MsgCreateProvider defines an SDK message for creating a provider +type MsgCreateProvider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` + Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` +} + +func (m *MsgCreateProvider) Reset() { *m = MsgCreateProvider{} } +func (m *MsgCreateProvider) String() string { return proto.CompactTextString(m) } +func (*MsgCreateProvider) ProtoMessage() {} +func (*MsgCreateProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{1} +} +func (m *MsgCreateProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateProvider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateProvider.Merge(m, src) +} +func (m *MsgCreateProvider) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateProvider) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateProvider proto.InternalMessageInfo + +func (m *MsgCreateProvider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *MsgCreateProvider) GetHostURI() string { + if m != nil { + return m.HostURI + } + return "" +} + +func (m *MsgCreateProvider) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *MsgCreateProvider) GetInfo() ProviderInfo { + if m != nil { + return m.Info + } + return ProviderInfo{} +} + +// MsgCreateProviderResponse defines the Msg/CreateProvider response type. +type MsgCreateProviderResponse struct { +} + +func (m *MsgCreateProviderResponse) Reset() { *m = MsgCreateProviderResponse{} } +func (m *MsgCreateProviderResponse) String() string { return proto.CompactTextString(m) } +func (*MsgCreateProviderResponse) ProtoMessage() {} +func (*MsgCreateProviderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{2} +} +func (m *MsgCreateProviderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgCreateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgCreateProviderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgCreateProviderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgCreateProviderResponse.Merge(m, src) +} +func (m *MsgCreateProviderResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgCreateProviderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgCreateProviderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgCreateProviderResponse proto.InternalMessageInfo + +// MsgUpdateProvider defines an SDK message for updating a provider +type MsgUpdateProvider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` + Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` +} + +func (m *MsgUpdateProvider) Reset() { *m = MsgUpdateProvider{} } +func (m *MsgUpdateProvider) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateProvider) ProtoMessage() {} +func (*MsgUpdateProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{3} +} +func (m *MsgUpdateProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateProvider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateProvider.Merge(m, src) +} +func (m *MsgUpdateProvider) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateProvider) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateProvider proto.InternalMessageInfo + +func (m *MsgUpdateProvider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *MsgUpdateProvider) GetHostURI() string { + if m != nil { + return m.HostURI + } + return "" +} + +func (m *MsgUpdateProvider) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *MsgUpdateProvider) GetInfo() ProviderInfo { + if m != nil { + return m.Info + } + return ProviderInfo{} +} + +// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. +type MsgUpdateProviderResponse struct { +} + +func (m *MsgUpdateProviderResponse) Reset() { *m = MsgUpdateProviderResponse{} } +func (m *MsgUpdateProviderResponse) String() string { return proto.CompactTextString(m) } +func (*MsgUpdateProviderResponse) ProtoMessage() {} +func (*MsgUpdateProviderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{4} +} +func (m *MsgUpdateProviderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgUpdateProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgUpdateProviderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgUpdateProviderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgUpdateProviderResponse.Merge(m, src) +} +func (m *MsgUpdateProviderResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgUpdateProviderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgUpdateProviderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgUpdateProviderResponse proto.InternalMessageInfo + +// MsgDeleteProvider defines an SDK message for deleting a provider +type MsgDeleteProvider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` +} + +func (m *MsgDeleteProvider) Reset() { *m = MsgDeleteProvider{} } +func (m *MsgDeleteProvider) String() string { return proto.CompactTextString(m) } +func (*MsgDeleteProvider) ProtoMessage() {} +func (*MsgDeleteProvider) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{5} +} +func (m *MsgDeleteProvider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDeleteProvider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDeleteProvider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDeleteProvider) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDeleteProvider.Merge(m, src) +} +func (m *MsgDeleteProvider) XXX_Size() int { + return m.Size() +} +func (m *MsgDeleteProvider) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDeleteProvider.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDeleteProvider proto.InternalMessageInfo + +func (m *MsgDeleteProvider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. +type MsgDeleteProviderResponse struct { +} + +func (m *MsgDeleteProviderResponse) Reset() { *m = MsgDeleteProviderResponse{} } +func (m *MsgDeleteProviderResponse) String() string { return proto.CompactTextString(m) } +func (*MsgDeleteProviderResponse) ProtoMessage() {} +func (*MsgDeleteProviderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{6} +} +func (m *MsgDeleteProviderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MsgDeleteProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_MsgDeleteProviderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *MsgDeleteProviderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_MsgDeleteProviderResponse.Merge(m, src) +} +func (m *MsgDeleteProviderResponse) XXX_Size() int { + return m.Size() +} +func (m *MsgDeleteProviderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_MsgDeleteProviderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_MsgDeleteProviderResponse proto.InternalMessageInfo + +// Provider stores owner and host details +type Provider struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner" yaml:"owner"` + HostURI string `protobuf:"bytes,2,opt,name=host_uri,json=hostUri,proto3" json:"host_uri" yaml:"host_uri"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes" yaml:"attributes"` + Info ProviderInfo `protobuf:"bytes,4,opt,name=info,proto3" json:"info" yaml:"info"` +} + +func (m *Provider) Reset() { *m = Provider{} } +func (*Provider) ProtoMessage() {} +func (*Provider) Descriptor() ([]byte, []int) { + return fileDescriptor_cbb1622664c70e47, []int{7} +} +func (m *Provider) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Provider) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Provider.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Provider) XXX_Merge(src proto.Message) { + xxx_messageInfo_Provider.Merge(m, src) +} +func (m *Provider) XXX_Size() int { + return m.Size() +} +func (m *Provider) XXX_DiscardUnknown() { + xxx_messageInfo_Provider.DiscardUnknown(m) +} + +var xxx_messageInfo_Provider proto.InternalMessageInfo + +func (m *Provider) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +func (m *Provider) GetHostURI() string { + if m != nil { + return m.HostURI + } + return "" +} + +func (m *Provider) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func (m *Provider) GetInfo() ProviderInfo { + if m != nil { + return m.Info + } + return ProviderInfo{} +} + +func init() { + proto.RegisterType((*ProviderInfo)(nil), "akash.provider.v1beta4.ProviderInfo") + proto.RegisterType((*MsgCreateProvider)(nil), "akash.provider.v1beta4.MsgCreateProvider") + proto.RegisterType((*MsgCreateProviderResponse)(nil), "akash.provider.v1beta4.MsgCreateProviderResponse") + proto.RegisterType((*MsgUpdateProvider)(nil), "akash.provider.v1beta4.MsgUpdateProvider") + proto.RegisterType((*MsgUpdateProviderResponse)(nil), "akash.provider.v1beta4.MsgUpdateProviderResponse") + proto.RegisterType((*MsgDeleteProvider)(nil), "akash.provider.v1beta4.MsgDeleteProvider") + proto.RegisterType((*MsgDeleteProviderResponse)(nil), "akash.provider.v1beta4.MsgDeleteProviderResponse") + proto.RegisterType((*Provider)(nil), "akash.provider.v1beta4.Provider") +} + +func init() { + proto.RegisterFile("akash/provider/v1beta4/provider.proto", fileDescriptor_cbb1622664c70e47) +} + +var fileDescriptor_cbb1622664c70e47 = []byte{ + // 586 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x4f, 0x8b, 0xd3, 0x4e, + 0x18, 0x4e, 0xb6, 0xed, 0xaf, 0xfd, 0x4d, 0x97, 0x95, 0x0d, 0x22, 0xdd, 0x2e, 0x66, 0x4a, 0x54, + 0xa8, 0x82, 0x09, 0xad, 0x82, 0xb2, 0x9e, 0xac, 0x2e, 0x58, 0xa1, 0x20, 0x81, 0x5e, 0xbc, 0x48, + 0x62, 0x67, 0xd3, 0xb0, 0x6d, 0x26, 0x64, 0xa6, 0x2d, 0x7b, 0xf4, 0x1b, 0x78, 0xf4, 0xe6, 0x1e, + 0x3c, 0xf9, 0x0d, 0xfc, 0x06, 0x7b, 0xdc, 0x8b, 0xe0, 0x69, 0x94, 0xf6, 0x22, 0x3d, 0xf6, 0x13, + 0x48, 0x66, 0xf2, 0xa7, 0xe9, 0xd6, 0xa2, 0x78, 0xd9, 0xc3, 0xde, 0xf2, 0x3e, 0xf3, 0xbc, 0xef, + 0xf3, 0x32, 0xcf, 0xc3, 0xb4, 0xe0, 0x8e, 0x75, 0x6c, 0x91, 0xbe, 0xe1, 0x07, 0x78, 0xec, 0xf6, + 0x50, 0x60, 0x8c, 0x1b, 0x36, 0xa2, 0xd6, 0xc3, 0x04, 0xd0, 0xfd, 0x00, 0x53, 0xac, 0xdc, 0xe0, + 0x34, 0x3d, 0x41, 0x23, 0x5a, 0xf5, 0xba, 0x83, 0x1d, 0xcc, 0x29, 0x46, 0xf8, 0x25, 0xd8, 0xd5, + 0xba, 0x18, 0x6a, 0x5b, 0x04, 0x19, 0x16, 0xa5, 0x81, 0x6b, 0x8f, 0x28, 0x22, 0xc6, 0xb8, 0x91, + 0x56, 0x82, 0xa9, 0xbd, 0x93, 0xc1, 0xf6, 0xab, 0x68, 0x68, 0xdb, 0x3b, 0xc2, 0xca, 0x63, 0x50, + 0x40, 0x43, 0xcb, 0x1d, 0x54, 0xe4, 0x9a, 0x5c, 0xff, 0xbf, 0xa5, 0x4d, 0x19, 0x2c, 0x1c, 0x76, + 0x2c, 0x77, 0x30, 0x67, 0x50, 0x9c, 0x2c, 0x18, 0xdc, 0x3e, 0xb1, 0x86, 0x83, 0x03, 0x8d, 0x97, + 0x9a, 0x29, 0x60, 0xe5, 0x11, 0x28, 0x4e, 0x90, 0x4d, 0x5c, 0x8a, 0x2a, 0x5b, 0xbc, 0xf7, 0xe6, + 0x9c, 0xc1, 0x18, 0x5a, 0x30, 0xb8, 0x23, 0x9a, 0x22, 0x40, 0x33, 0xe3, 0x23, 0xed, 0x53, 0x0e, + 0xec, 0x76, 0x88, 0xf3, 0x2c, 0x40, 0x16, 0x45, 0xf1, 0x32, 0x8a, 0x01, 0x0a, 0x78, 0xe2, 0xa1, + 0x20, 0x5a, 0x64, 0x2f, 0xd4, 0xe7, 0x40, 0xaa, 0xcf, 0x4b, 0xcd, 0x14, 0xb0, 0x72, 0x08, 0x4a, + 0x7d, 0x4c, 0xe8, 0x9b, 0x51, 0xe0, 0x46, 0x0b, 0xdc, 0x9b, 0x32, 0x58, 0x7c, 0x81, 0x09, 0xed, + 0x9a, 0xed, 0x39, 0x83, 0xc9, 0xf1, 0x82, 0xc1, 0x6b, 0x62, 0x42, 0x8c, 0x68, 0x66, 0x31, 0xfc, + 0xec, 0x06, 0xae, 0xf2, 0x45, 0x06, 0x20, 0xbd, 0xb3, 0x4a, 0xae, 0x96, 0xab, 0x97, 0x9b, 0xb7, + 0x74, 0x71, 0xff, 0xe1, 0x8d, 0xea, 0xe9, 0xa9, 0x3e, 0x6e, 0xe8, 0x4f, 0xe3, 0xaa, 0x45, 0xcf, + 0x18, 0x94, 0xe6, 0x0c, 0x2e, 0xb5, 0x2f, 0x18, 0xdc, 0x15, 0x4a, 0x29, 0xa6, 0x7d, 0xfe, 0x0e, + 0xdb, 0x8e, 0x4b, 0xfb, 0x23, 0x5b, 0x7f, 0x8b, 0x87, 0x06, 0x9f, 0x7c, 0xdf, 0x43, 0x74, 0x82, + 0x83, 0xe3, 0xa8, 0xb2, 0x7c, 0xd7, 0x70, 0xb0, 0xe1, 0xe1, 0x1e, 0x32, 0xe8, 0x89, 0x8f, 0x48, + 0xd6, 0xc6, 0x54, 0x94, 0x98, 0x4b, 0x6a, 0x4a, 0x17, 0xe4, 0x5d, 0xef, 0x08, 0x57, 0xf2, 0x35, + 0xb9, 0x5e, 0x6e, 0xde, 0xd6, 0xd7, 0x87, 0x46, 0x5f, 0x36, 0xbc, 0xb5, 0x1f, 0x6d, 0xcd, 0x3b, + 0x17, 0x0c, 0x96, 0xc5, 0xbe, 0x61, 0xa5, 0x99, 0x1c, 0x3c, 0xc8, 0xff, 0x3c, 0x85, 0x92, 0xb6, + 0x0f, 0xf6, 0x2e, 0xb8, 0x64, 0x22, 0xe2, 0x63, 0x8f, 0x24, 0x1e, 0x76, 0xfd, 0xde, 0x95, 0x87, + 0x97, 0xde, 0xc3, 0xac, 0x4b, 0x89, 0x87, 0x2f, 0xb9, 0x85, 0xcf, 0xd1, 0x00, 0xfd, 0x83, 0x85, + 0x19, 0xa1, 0xec, 0xac, 0x44, 0xe8, 0x63, 0x0e, 0x94, 0xae, 0x32, 0x72, 0xf9, 0x32, 0x52, 0xfa, + 0x70, 0x0a, 0xa5, 0xd0, 0xbe, 0xe6, 0xd7, 0x2d, 0x90, 0xeb, 0x10, 0x47, 0xf1, 0xc0, 0xce, 0xca, + 0xb3, 0x7c, 0xf7, 0x77, 0x62, 0x17, 0xde, 0x86, 0x6a, 0xe3, 0x8f, 0xa9, 0x71, 0x32, 0x42, 0xbd, + 0x95, 0x27, 0x64, 0x93, 0x5e, 0x96, 0xba, 0x51, 0x6f, 0x7d, 0xe4, 0x43, 0xbd, 0x95, 0xbc, 0x6f, + 0xd2, 0xcb, 0x52, 0x37, 0xea, 0xad, 0x4f, 0x7e, 0xab, 0x7b, 0x36, 0x55, 0xe5, 0xf3, 0xa9, 0x2a, + 0xff, 0x98, 0xaa, 0xf2, 0xfb, 0x99, 0x2a, 0x9d, 0xcf, 0x54, 0xe9, 0xdb, 0x4c, 0x95, 0x5e, 0x3f, + 0xf9, 0x8b, 0xa4, 0xac, 0xfe, 0x59, 0xb0, 0xff, 0xe3, 0x3f, 0xe6, 0x0f, 0x7e, 0x05, 0x00, 0x00, + 0xff, 0xff, 0xe7, 0xfa, 0xf6, 0x34, 0x4d, 0x08, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// MsgClient is the client API for Msg service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MsgClient interface { + // CreateProvider defines a method that creates a provider given the proper inputs + CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) + // UpdateProvider defines a method that updates a provider given the proper inputs + UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) + // DeleteProvider defines a method that deletes a provider given the proper inputs + DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) +} + +type msgClient struct { + cc grpc1.ClientConn +} + +func NewMsgClient(cc grpc1.ClientConn) MsgClient { + return &msgClient{cc} +} + +func (c *msgClient) CreateProvider(ctx context.Context, in *MsgCreateProvider, opts ...grpc.CallOption) (*MsgCreateProviderResponse, error) { + out := new(MsgCreateProviderResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Msg/CreateProvider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) UpdateProvider(ctx context.Context, in *MsgUpdateProvider, opts ...grpc.CallOption) (*MsgUpdateProviderResponse, error) { + out := new(MsgUpdateProviderResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Msg/UpdateProvider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *msgClient) DeleteProvider(ctx context.Context, in *MsgDeleteProvider, opts ...grpc.CallOption) (*MsgDeleteProviderResponse, error) { + out := new(MsgDeleteProviderResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Msg/DeleteProvider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MsgServer is the server API for Msg service. +type MsgServer interface { + // CreateProvider defines a method that creates a provider given the proper inputs + CreateProvider(context.Context, *MsgCreateProvider) (*MsgCreateProviderResponse, error) + // UpdateProvider defines a method that updates a provider given the proper inputs + UpdateProvider(context.Context, *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) + // DeleteProvider defines a method that deletes a provider given the proper inputs + DeleteProvider(context.Context, *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) +} + +// UnimplementedMsgServer can be embedded to have forward compatible implementations. +type UnimplementedMsgServer struct { +} + +func (*UnimplementedMsgServer) CreateProvider(ctx context.Context, req *MsgCreateProvider) (*MsgCreateProviderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateProvider not implemented") +} +func (*UnimplementedMsgServer) UpdateProvider(ctx context.Context, req *MsgUpdateProvider) (*MsgUpdateProviderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method UpdateProvider not implemented") +} +func (*UnimplementedMsgServer) DeleteProvider(ctx context.Context, req *MsgDeleteProvider) (*MsgDeleteProviderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method DeleteProvider not implemented") +} + +func RegisterMsgServer(s grpc1.Server, srv MsgServer) { + s.RegisterService(&_Msg_serviceDesc, srv) +} + +func _Msg_CreateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgCreateProvider) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).CreateProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Msg/CreateProvider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).CreateProvider(ctx, req.(*MsgCreateProvider)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_UpdateProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgUpdateProvider) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).UpdateProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Msg/UpdateProvider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).UpdateProvider(ctx, req.(*MsgUpdateProvider)) + } + return interceptor(ctx, in, info, handler) +} + +func _Msg_DeleteProvider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(MsgDeleteProvider) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MsgServer).DeleteProvider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Msg/DeleteProvider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MsgServer).DeleteProvider(ctx, req.(*MsgDeleteProvider)) + } + return interceptor(ctx, in, info, handler) +} + +var _Msg_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.provider.v1beta4.Msg", + HandlerType: (*MsgServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateProvider", + Handler: _Msg_CreateProvider_Handler, + }, + { + MethodName: "UpdateProvider", + Handler: _Msg_UpdateProvider_Handler, + }, + { + MethodName: "DeleteProvider", + Handler: _Msg_DeleteProvider_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/provider/v1beta4/provider.proto", +} + +func (m *ProviderInfo) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProviderInfo) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProviderInfo) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Website) > 0 { + i -= len(m.Website) + copy(dAtA[i:], m.Website) + i = encodeVarintProvider(dAtA, i, uint64(len(m.Website))) + i-- + dAtA[i] = 0x12 + } + if len(m.EMail) > 0 { + i -= len(m.EMail) + copy(dAtA[i:], m.EMail) + i = encodeVarintProvider(dAtA, i, uint64(len(m.EMail))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProvider(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProvider(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.HostURI) > 0 { + i -= len(m.HostURI) + copy(dAtA[i:], m.HostURI) + i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgCreateProviderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgCreateProviderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgCreateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgUpdateProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProvider(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProvider(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.HostURI) > 0 { + i -= len(m.HostURI) + copy(dAtA[i:], m.HostURI) + i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgUpdateProviderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgUpdateProviderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgUpdateProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *MsgDeleteProvider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDeleteProvider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDeleteProvider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *MsgDeleteProviderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MsgDeleteProviderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MsgDeleteProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + return len(dAtA) - i, nil +} + +func (m *Provider) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Provider) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Provider) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProvider(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintProvider(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + if len(m.HostURI) > 0 { + i -= len(m.HostURI) + copy(dAtA[i:], m.HostURI) + i = encodeVarintProvider(dAtA, i, uint64(len(m.HostURI))) + i-- + dAtA[i] = 0x12 + } + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintProvider(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintProvider(dAtA []byte, offset int, v uint64) int { + offset -= sovProvider(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ProviderInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.EMail) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + l = len(m.Website) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + return n +} + +func (m *MsgCreateProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + l = len(m.HostURI) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovProvider(uint64(l)) + } + } + l = m.Info.Size() + n += 1 + l + sovProvider(uint64(l)) + return n +} + +func (m *MsgCreateProviderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgUpdateProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + l = len(m.HostURI) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovProvider(uint64(l)) + } + } + l = m.Info.Size() + n += 1 + l + sovProvider(uint64(l)) + return n +} + +func (m *MsgUpdateProviderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *MsgDeleteProvider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + return n +} + +func (m *MsgDeleteProviderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + return n +} + +func (m *Provider) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + l = len(m.HostURI) + if l > 0 { + n += 1 + l + sovProvider(uint64(l)) + } + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovProvider(uint64(l)) + } + } + l = m.Info.Size() + n += 1 + l + sovProvider(uint64(l)) + return n +} + +func sovProvider(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozProvider(x uint64) (n int) { + return sovProvider(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ProviderInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProviderInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProviderInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field EMail", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.EMail = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Website", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Website = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgCreateProviderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgCreateProviderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgCreateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgUpdateProviderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgUpdateProviderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgUpdateProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDeleteProvider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDeleteProvider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDeleteProvider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MsgDeleteProviderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MsgDeleteProviderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MsgDeleteProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Provider) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Provider: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Provider: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field HostURI", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.HostURI = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowProvider + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthProvider + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthProvider + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipProvider(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthProvider + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipProvider(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProvider + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProvider + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowProvider + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthProvider + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupProvider + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthProvider + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthProvider = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowProvider = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupProvider = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/provider/v1beta4/query.pb.go b/go/node/provider/v1beta4/query.pb.go new file mode 100644 index 00000000..0b15271e --- /dev/null +++ b/go/node/provider/v1beta4/query.pb.go @@ -0,0 +1,1059 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/provider/v1beta4/query.proto + +package v1beta4 + +import ( + context "context" + fmt "fmt" + query "github.com/cosmos/cosmos-sdk/types/query" + _ "github.com/gogo/protobuf/gogoproto" + grpc1 "github.com/gogo/protobuf/grpc" + proto "github.com/gogo/protobuf/proto" + _ "google.golang.org/genproto/googleapis/api/annotations" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// QueryProvidersRequest is request type for the Query/Providers RPC method +type QueryProvidersRequest struct { + Pagination *query.PageRequest `protobuf:"bytes,1,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryProvidersRequest) Reset() { *m = QueryProvidersRequest{} } +func (m *QueryProvidersRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProvidersRequest) ProtoMessage() {} +func (*QueryProvidersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fc667e24f0c91e71, []int{0} +} +func (m *QueryProvidersRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProvidersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProvidersRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProvidersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProvidersRequest.Merge(m, src) +} +func (m *QueryProvidersRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProvidersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProvidersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProvidersRequest proto.InternalMessageInfo + +func (m *QueryProvidersRequest) GetPagination() *query.PageRequest { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +type QueryProvidersResponse struct { + Providers Providers `protobuf:"bytes,1,rep,name=providers,proto3,castrepeated=Providers" json:"providers"` + Pagination *query.PageResponse `protobuf:"bytes,2,opt,name=pagination,proto3" json:"pagination,omitempty"` +} + +func (m *QueryProvidersResponse) Reset() { *m = QueryProvidersResponse{} } +func (m *QueryProvidersResponse) String() string { return proto.CompactTextString(m) } +func (*QueryProvidersResponse) ProtoMessage() {} +func (*QueryProvidersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fc667e24f0c91e71, []int{1} +} +func (m *QueryProvidersResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProvidersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProvidersResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProvidersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProvidersResponse.Merge(m, src) +} +func (m *QueryProvidersResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryProvidersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProvidersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProvidersResponse proto.InternalMessageInfo + +func (m *QueryProvidersResponse) GetProviders() Providers { + if m != nil { + return m.Providers + } + return nil +} + +func (m *QueryProvidersResponse) GetPagination() *query.PageResponse { + if m != nil { + return m.Pagination + } + return nil +} + +// QueryProviderRequest is request type for the Query/Provider RPC method +type QueryProviderRequest struct { + Owner string `protobuf:"bytes,1,opt,name=owner,proto3" json:"owner,omitempty"` +} + +func (m *QueryProviderRequest) Reset() { *m = QueryProviderRequest{} } +func (m *QueryProviderRequest) String() string { return proto.CompactTextString(m) } +func (*QueryProviderRequest) ProtoMessage() {} +func (*QueryProviderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_fc667e24f0c91e71, []int{2} +} +func (m *QueryProviderRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderRequest.Merge(m, src) +} +func (m *QueryProviderRequest) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderRequest proto.InternalMessageInfo + +func (m *QueryProviderRequest) GetOwner() string { + if m != nil { + return m.Owner + } + return "" +} + +// QueryProviderResponse is response type for the Query/Provider RPC method +type QueryProviderResponse struct { + Provider Provider `protobuf:"bytes,1,opt,name=provider,proto3" json:"provider"` +} + +func (m *QueryProviderResponse) Reset() { *m = QueryProviderResponse{} } +func (m *QueryProviderResponse) String() string { return proto.CompactTextString(m) } +func (*QueryProviderResponse) ProtoMessage() {} +func (*QueryProviderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_fc667e24f0c91e71, []int{3} +} +func (m *QueryProviderResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *QueryProviderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_QueryProviderResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *QueryProviderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_QueryProviderResponse.Merge(m, src) +} +func (m *QueryProviderResponse) XXX_Size() int { + return m.Size() +} +func (m *QueryProviderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_QueryProviderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_QueryProviderResponse proto.InternalMessageInfo + +func (m *QueryProviderResponse) GetProvider() Provider { + if m != nil { + return m.Provider + } + return Provider{} +} + +func init() { + proto.RegisterType((*QueryProvidersRequest)(nil), "akash.provider.v1beta4.QueryProvidersRequest") + proto.RegisterType((*QueryProvidersResponse)(nil), "akash.provider.v1beta4.QueryProvidersResponse") + proto.RegisterType((*QueryProviderRequest)(nil), "akash.provider.v1beta4.QueryProviderRequest") + proto.RegisterType((*QueryProviderResponse)(nil), "akash.provider.v1beta4.QueryProviderResponse") +} + +func init() { + proto.RegisterFile("akash/provider/v1beta4/query.proto", fileDescriptor_fc667e24f0c91e71) +} + +var fileDescriptor_fc667e24f0c91e71 = []byte{ + // 450 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x93, 0x31, 0x6f, 0xd3, 0x40, + 0x14, 0xc7, 0x7d, 0x81, 0xa2, 0xe6, 0x3a, 0x71, 0x0a, 0x55, 0x15, 0x21, 0x37, 0x18, 0x01, 0x2d, + 0x34, 0x77, 0x4a, 0x60, 0x63, 0xcb, 0x00, 0x6b, 0x6b, 0x89, 0x05, 0x06, 0x74, 0x6e, 0x4f, 0x57, + 0xab, 0xf4, 0x9e, 0xe3, 0x73, 0x12, 0x21, 0xc4, 0xc2, 0x27, 0x40, 0x42, 0x2c, 0x7c, 0x04, 0x66, + 0x46, 0x3e, 0x40, 0xc6, 0x48, 0x2c, 0x4c, 0x80, 0x12, 0x3e, 0x08, 0xf2, 0xdd, 0x39, 0x26, 0x69, + 0x22, 0x67, 0xf3, 0xd9, 0xff, 0xf7, 0x7f, 0xbf, 0xf7, 0x3f, 0x3f, 0x1c, 0xf0, 0x0b, 0xae, 0xcf, + 0x59, 0x92, 0xc2, 0x30, 0x3e, 0x13, 0x29, 0x1b, 0x76, 0x22, 0x91, 0xf1, 0x27, 0xac, 0x3f, 0x10, + 0xe9, 0x5b, 0x9a, 0xa4, 0x90, 0x01, 0xd9, 0x35, 0x1a, 0x5a, 0x68, 0xa8, 0xd3, 0x34, 0x1b, 0x12, + 0x24, 0x18, 0x09, 0xcb, 0x9f, 0xac, 0xba, 0x79, 0x5b, 0x02, 0xc8, 0x37, 0x82, 0xf1, 0x24, 0x66, + 0x5c, 0x29, 0xc8, 0x78, 0x16, 0x83, 0xd2, 0xee, 0xeb, 0xc3, 0x53, 0xd0, 0x97, 0xa0, 0x59, 0xc4, + 0xb5, 0xb0, 0x4d, 0x5c, 0xcb, 0x0e, 0x4b, 0xb8, 0x8c, 0x95, 0x11, 0x3b, 0xed, 0xbd, 0x35, 0x6c, + 0x73, 0x10, 0x23, 0x0b, 0x5e, 0xe3, 0x5b, 0x27, 0xb9, 0xd1, 0xb1, 0x7b, 0xad, 0x43, 0xd1, 0x1f, + 0x08, 0x9d, 0x91, 0x67, 0x18, 0x97, 0x9e, 0x7b, 0xa8, 0x85, 0x0e, 0x76, 0xba, 0xf7, 0xa9, 0x05, + 0xa0, 0x39, 0x00, 0xb5, 0x53, 0x3a, 0x00, 0x7a, 0xcc, 0xa5, 0x70, 0xb5, 0xe1, 0x7f, 0x95, 0xc1, + 0x37, 0x84, 0x77, 0x97, 0x3b, 0xe8, 0x04, 0x94, 0x16, 0xe4, 0x04, 0xd7, 0x0b, 0x1a, 0xbd, 0x87, + 0x5a, 0xd7, 0x0e, 0x76, 0xba, 0x2d, 0xba, 0x3a, 0x2e, 0x5a, 0x54, 0xf7, 0x6e, 0x8e, 0x7f, 0xed, + 0x7b, 0x5f, 0x7f, 0xef, 0xd7, 0x4b, 0xbf, 0xd2, 0x85, 0x3c, 0x5f, 0xa0, 0xae, 0x19, 0xea, 0x07, + 0x95, 0xd4, 0x96, 0x67, 0x01, 0xfb, 0x08, 0x37, 0x16, 0xa8, 0x8b, 0x58, 0x1a, 0x78, 0x0b, 0x46, + 0x4a, 0xa4, 0x26, 0x91, 0x7a, 0x68, 0x0f, 0xc1, 0xab, 0xa5, 0x14, 0xe7, 0x23, 0xf6, 0xf0, 0x76, + 0x01, 0xe7, 0x32, 0xac, 0x9e, 0xf0, 0x7a, 0x3e, 0x61, 0x38, 0xaf, 0xeb, 0x7e, 0xaf, 0xe1, 0x2d, + 0xe3, 0x4e, 0x3e, 0x23, 0x5c, 0x8e, 0x4d, 0xda, 0xeb, 0x9c, 0x56, 0x5e, 0x68, 0x93, 0x6e, 0x2a, + 0xb7, 0xe8, 0xc1, 0xe1, 0x87, 0x1f, 0x7f, 0x3f, 0xd5, 0xee, 0x92, 0x3b, 0xac, 0xe2, 0x4f, 0xd2, + 0xe4, 0x0b, 0xc2, 0xdb, 0x85, 0x01, 0x39, 0xda, 0xa8, 0x4f, 0x41, 0xd5, 0xde, 0x50, 0xed, 0xa0, + 0x3a, 0x06, 0xea, 0x11, 0x39, 0xac, 0x84, 0x62, 0xef, 0xcc, 0xd5, 0xbc, 0xef, 0xbd, 0x18, 0x4f, + 0x7d, 0x34, 0x99, 0xfa, 0xe8, 0xcf, 0xd4, 0x47, 0x1f, 0x67, 0xbe, 0x37, 0x99, 0xf9, 0xde, 0xcf, + 0x99, 0xef, 0xbd, 0x7c, 0x2a, 0xe3, 0xec, 0x7c, 0x10, 0xd1, 0x53, 0xb8, 0xb4, 0x76, 0x6d, 0x25, + 0xb2, 0x11, 0xa4, 0x17, 0xee, 0x94, 0x2f, 0xa1, 0x04, 0xa6, 0xe0, 0x4c, 0x5c, 0x69, 0x14, 0xdd, + 0x30, 0xfb, 0xf3, 0xf8, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xce, 0x60, 0x15, 0x8e, 0x04, 0x04, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// QueryClient is the client API for Query service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type QueryClient interface { + // Providers queries providers + Providers(ctx context.Context, in *QueryProvidersRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) + // Provider queries provider details + Provider(ctx context.Context, in *QueryProviderRequest, opts ...grpc.CallOption) (*QueryProviderResponse, error) +} + +type queryClient struct { + cc grpc1.ClientConn +} + +func NewQueryClient(cc grpc1.ClientConn) QueryClient { + return &queryClient{cc} +} + +func (c *queryClient) Providers(ctx context.Context, in *QueryProvidersRequest, opts ...grpc.CallOption) (*QueryProvidersResponse, error) { + out := new(QueryProvidersResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Query/Providers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *queryClient) Provider(ctx context.Context, in *QueryProviderRequest, opts ...grpc.CallOption) (*QueryProviderResponse, error) { + out := new(QueryProviderResponse) + err := c.cc.Invoke(ctx, "/akash.provider.v1beta4.Query/Provider", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// QueryServer is the server API for Query service. +type QueryServer interface { + // Providers queries providers + Providers(context.Context, *QueryProvidersRequest) (*QueryProvidersResponse, error) + // Provider queries provider details + Provider(context.Context, *QueryProviderRequest) (*QueryProviderResponse, error) +} + +// UnimplementedQueryServer can be embedded to have forward compatible implementations. +type UnimplementedQueryServer struct { +} + +func (*UnimplementedQueryServer) Providers(ctx context.Context, req *QueryProvidersRequest) (*QueryProvidersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Providers not implemented") +} +func (*UnimplementedQueryServer) Provider(ctx context.Context, req *QueryProviderRequest) (*QueryProviderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Provider not implemented") +} + +func RegisterQueryServer(s grpc1.Server, srv QueryServer) { + s.RegisterService(&_Query_serviceDesc, srv) +} + +func _Query_Providers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryProvidersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Providers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Query/Providers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Providers(ctx, req.(*QueryProvidersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Query_Provider_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(QueryProviderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(QueryServer).Provider(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/akash.provider.v1beta4.Query/Provider", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(QueryServer).Provider(ctx, req.(*QueryProviderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Query_serviceDesc = grpc.ServiceDesc{ + ServiceName: "akash.provider.v1beta4.Query", + HandlerType: (*QueryServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Providers", + Handler: _Query_Providers_Handler, + }, + { + MethodName: "Provider", + Handler: _Query_Provider_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "akash/provider/v1beta4/query.proto", +} + +func (m *QueryProvidersRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProvidersRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProvidersRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryProvidersResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProvidersResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProvidersResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Pagination != nil { + { + size, err := m.Pagination.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if len(m.Providers) > 0 { + for iNdEx := len(m.Providers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Providers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Owner) > 0 { + i -= len(m.Owner) + copy(dAtA[i:], m.Owner) + i = encodeVarintQuery(dAtA, i, uint64(len(m.Owner))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *QueryProviderResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *QueryProviderResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *QueryProviderResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size, err := m.Provider.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintQuery(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintQuery(dAtA []byte, offset int, v uint64) int { + offset -= sovQuery(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *QueryProvidersRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProvidersResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Providers) > 0 { + for _, e := range m.Providers { + l = e.Size() + n += 1 + l + sovQuery(uint64(l)) + } + } + if m.Pagination != nil { + l = m.Pagination.Size() + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Owner) + if l > 0 { + n += 1 + l + sovQuery(uint64(l)) + } + return n +} + +func (m *QueryProviderResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Provider.Size() + n += 1 + l + sovQuery(uint64(l)) + return n +} + +func sovQuery(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozQuery(x uint64) (n int) { + return sovQuery(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *QueryProvidersRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProvidersRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProvidersRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageRequest{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProvidersResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProvidersResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProvidersResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Providers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Providers = append(m.Providers, Provider{}) + if err := m.Providers[len(m.Providers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Pagination", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Pagination == nil { + m.Pagination = &query.PageResponse{} + } + if err := m.Pagination.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Owner", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Owner = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *QueryProviderResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: QueryProviderResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: QueryProviderResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Provider", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowQuery + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthQuery + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthQuery + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Provider.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipQuery(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthQuery + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipQuery(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowQuery + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthQuery + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupQuery + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthQuery + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthQuery = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowQuery = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupQuery = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/provider/v1beta4/query.pb.gw.go b/go/node/provider/v1beta4/query.pb.gw.go new file mode 100644 index 00000000..51991e27 --- /dev/null +++ b/go/node/provider/v1beta4/query.pb.gw.go @@ -0,0 +1,272 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: akash/provider/v1beta4/query.proto + +/* +Package v1beta4 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1beta4 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +var ( + filter_Query_Providers_0 = &utilities.DoubleArray{Encoding: map[string]int{}, Base: []int(nil), Check: []int(nil)} +) + +func request_Query_Providers_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProvidersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Providers_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Providers(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Providers_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProvidersRequest + var metadata runtime.ServerMetadata + + if err := req.ParseForm(); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + if err := runtime.PopulateQueryParameters(&protoReq, req.Form, filter_Query_Providers_0); err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Providers(ctx, &protoReq) + return msg, metadata, err + +} + +func request_Query_Provider_0(ctx context.Context, marshaler runtime.Marshaler, client QueryClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := client.Provider(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_Query_Provider_0(ctx context.Context, marshaler runtime.Marshaler, server QueryServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq QueryProviderRequest + var metadata runtime.ServerMetadata + + var ( + val string + ok bool + err error + _ = err + ) + + val, ok = pathParams["owner"] + if !ok { + return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "owner") + } + + protoReq.Owner, err = runtime.String(val) + + if err != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "owner", err) + } + + msg, err := server.Provider(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterQueryHandlerServer registers the http handlers for service Query to "mux". +// UnaryRPC :call QueryServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterQueryHandlerFromEndpoint instead. +func RegisterQueryHandlerServer(ctx context.Context, mux *runtime.ServeMux, server QueryServer) error { + + mux.Handle("GET", pattern_Query_Providers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Providers_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Providers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Provider_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_Query_Provider_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Provider_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterQueryHandlerFromEndpoint is same as RegisterQueryHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterQueryHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterQueryHandler(ctx, mux, conn) +} + +// RegisterQueryHandler registers the http handlers for service Query to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterQueryHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterQueryHandlerClient(ctx, mux, NewQueryClient(conn)) +} + +// RegisterQueryHandlerClient registers the http handlers for service Query +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "QueryClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "QueryClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "QueryClient" to call the correct interceptors. +func RegisterQueryHandlerClient(ctx context.Context, mux *runtime.ServeMux, client QueryClient) error { + + mux.Handle("GET", pattern_Query_Providers_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Providers_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Providers_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + mux.Handle("GET", pattern_Query_Provider_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_Query_Provider_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_Query_Provider_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_Query_Providers_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"akash", "provider", "v1beta4", "providers"}, "", runtime.AssumeColonVerbOpt(true))) + + pattern_Query_Provider_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4}, []string{"akash", "provider", "v1beta4", "providers", "owner"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_Query_Providers_0 = runtime.ForwardResponseMessage + + forward_Query_Provider_0 = runtime.ForwardResponseMessage +) diff --git a/go/node/provider/v1beta4/types.go b/go/node/provider/v1beta4/types.go new file mode 100644 index 00000000..a0589864 --- /dev/null +++ b/go/node/provider/v1beta4/types.go @@ -0,0 +1,67 @@ +package v1beta4 + +import ( + "bytes" + "fmt" + "net/url" + + sdk "github.com/cosmos/cosmos-sdk/types" +) + +// String implements the Stringer interface for a Provider object. +func (p Provider) String() string { + res := fmt.Sprintf(`Deployment + Owner: %s + HostURI: %s + Attributes: %v + `, p.Owner, p.HostURI, p.Attributes) + + if !p.Info.IsEmpty() { + res += fmt.Sprintf("Info: %v\n", p.Info) + } + return res +} + +// Providers is the collection of Provider +type Providers []Provider + +// String implements the Stringer interface for a Providers object. +func (obj Providers) String() string { + var buf bytes.Buffer + + const sep = "\n\n" + + for _, p := range obj { + buf.WriteString(p.String()) + buf.WriteString(sep) + } + + if len(obj) > 0 { + buf.Truncate(buf.Len() - len(sep)) + } + + return buf.String() +} + +// Address implements provider and returns owner of provider +func (p *Provider) Address() sdk.AccAddress { + owner, err := sdk.AccAddressFromBech32(p.Owner) + if err != nil { + panic(err) + } + + return owner +} + +func (m ProviderInfo) IsEmpty() bool { + return m.EMail == "" && m.Website == "" +} + +func (m ProviderInfo) Validate() error { + if m.Website != "" { + if _, err := url.Parse(m.Website); err != nil { + return ErrInvalidInfoWebsite + } + } + return nil +} diff --git a/go/node/types/attributes/v1/attribute.go b/go/node/types/attributes/v1/attribute.go new file mode 100644 index 00000000..275a63a9 --- /dev/null +++ b/go/node/types/attributes/v1/attribute.go @@ -0,0 +1,381 @@ +package v1 + +import ( + "path/filepath" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + + "gopkg.in/yaml.v3" + + sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" +) + +const ( + moduleName = "akash" + AttributeNameRegexpStringWildcard = `^([a-zA-Z][\w\/\.\-]{1,126}[\w\*]?)$` + AttributeNameRegexpString = `^([a-zA-Z][\w\/\.\-]{1,126})$` +) + +const ( + errAttributesDuplicateKeys uint32 = iota + 1 + errInvalidAttributeKey +) + +var ( + ErrAttributesDuplicateKeys = sdkerrors.Register(moduleName, errAttributesDuplicateKeys, "attributes cannot have duplicate keys") + ErrInvalidAttributeKey = sdkerrors.Register(moduleName, errInvalidAttributeKey, "attribute key does not match regexp") +) + +var ( + attributeNameRegexpWildcard = regexp.MustCompile(AttributeNameRegexpStringWildcard) +) + +/* +Attributes purpose of using this type in favor of Cosmos's sdk.Attribute is +ability to later extend it with operators to support querying on things like +cpu/memory/storage attributes +At this moment type though is same as sdk.Attributes but all akash libraries were +turned to use a new one +*/ +type Attributes []Attribute + +var _ sort.Interface = (*Attributes)(nil) + +type AttributesGroup []Attributes + +type AttributeValue interface { + AsBool() (bool, bool) + AsString() (string, bool) +} + +type attributeValue struct { + value string +} + +func (val attributeValue) AsBool() (bool, bool) { + if val.value == "" { + return false, false + } + + res, err := strconv.ParseBool(val.value) + if err != nil { + return false, false + } + + return res, true +} + +func (val attributeValue) AsString() (string, bool) { + if val.value == "" { + return "", false + } + + return val.value, true +} + +func (m PlacementRequirements) Dup() PlacementRequirements { + return PlacementRequirements{ + SignedBy: m.SignedBy, + Attributes: m.Attributes.Dup(), + } +} + +func NewStringAttribute(key, val string) Attribute { + return Attribute{ + Key: key, + Value: val, + } +} + +func (m *Attribute) String() string { + res, _ := yaml.Marshal(m) + return string(res) +} + +func (m *Attribute) Equal(rhs *Attribute) bool { + return reflect.DeepEqual(m, rhs) +} + +func (m Attribute) SubsetOf(rhs Attribute) bool { + if match, _ := filepath.Match(m.Key, rhs.Key); match && (m.Value == rhs.Value) { + return true + } + + return false +} + +func (attr Attributes) Len() int { + return len(attr) +} + +func (attr Attributes) Swap(i, j int) { + attr[i], attr[j] = attr[j], attr[i] +} + +func (attr Attributes) Less(i, j int) bool { + return attr[i].Key < attr[j].Key +} + +func (attr Attributes) Validate() error { + return attr.ValidateWithRegex(attributeNameRegexpWildcard) +} + +func (attr Attributes) ValidateWithRegex(r *regexp.Regexp) error { + store := make(map[string]bool) + + for i := range attr { + if !r.MatchString(attr[i].Key) { + return ErrInvalidAttributeKey + } + + if _, ok := store[attr[i].Key]; ok { + return ErrAttributesDuplicateKeys + } + + store[attr[i].Key] = true + } + + return nil +} + +func (attr Attributes) Dup() Attributes { + if attr == nil { + return nil + } + + res := make(Attributes, 0, len(attr)) + + for _, pair := range attr { + res = append(res, Attribute{ + Key: pair.Key, + Value: pair.Value, + }) + } + + return res +} + +// AttributesSubsetOf check if a is subset of b +// nolint: gofmt +// For example there are two yaml files being converted into these attributes +// example 1: a is subset of b +// --- +// // a +// attributes: +// +// region: +// - us-east-1 +// +// --- +// b +// attributes: +// +// region: +// - us-east-1 +// - us-east-2 +// +// example 2: a is not subset of b +// attributes: +// +// region: +// - us-east-1 +// +// --- +// b +// attributes: +// +// region: +// - us-east-2 +// - us-east-3 +// +// example 3: a is subset of b +// attributes: +// +// region: +// - us-east-2 +// - us-east-3 +// +// --- +// b +// attributes: +// +// region: +// - us-east-2 +func AttributesSubsetOf(a, b Attributes) bool { +loop: + for _, req := range a { + for _, attr := range b { + if req.SubsetOf(attr) { + continue loop + } + } + return false + } + + return true +} + +func AttributesAnyOf(a, b Attributes) bool { + for _, req := range a { + for _, attr := range b { + if req.SubsetOf(attr) { + return true + } + } + } + + return false +} + +func (attr Attributes) SubsetOf(b Attributes) bool { + return AttributesSubsetOf(attr, b) +} + +func (attr Attributes) AnyOf(b Attributes) bool { + return AttributesAnyOf(attr, b) +} + +func (attr Attributes) Find(glob string) AttributeValue { + // todo wildcard + + var val attributeValue + + for i := range attr { + if glob == attr[i].Key { + val.value = attr[i].Value + break + } + } + + return val +} + +func (attr Attributes) Iterate(prefix string, fn func(group, key, value string)) { + for _, item := range attr { + if strings.HasPrefix(item.Key, prefix) { + tokens := strings.SplitAfter(item.Key, "/") + tokens = tokens[1:] + fn(tokens[1], tokens[2], item.Value) + } + } +} + +// GetCapabilitiesGroup +// +// example +// capabilities/storage/1/persistent: true +// capabilities/storage/1/class: io1 +// capabilities/storage/2/persistent: false +// +// nolint: gofmt +// returns +// - - persistent: true +// class: nvme +// - - persistent: false +func (attr Attributes) GetCapabilitiesGroup(prefix string) AttributesGroup { + var res AttributesGroup // nolint:prealloc + + groups := make(map[string]Attributes) + + for _, item := range attr { + if !strings.HasPrefix(item.Key, "capabilities/"+prefix) { + continue + } + + tokens := strings.SplitAfter(strings.TrimPrefix(item.Key, "capabilities/"), "/") + // skip malformed attributes. really? + if len(tokens) != 3 { + continue + } + + // filter out prefix name + tokens = tokens[1:] + + group := groups[tokens[0]] + if group == nil { + group = Attributes{} + } + + group = append(group, Attribute{ + Key: tokens[1], + Value: item.Value, + }) + + groups[tokens[0]] = group + } + + for _, group := range groups { + res = append(res, group) + } + + return res +} + +func (attr Attributes) GetCapabilitiesMap(prefix string) AttributesGroup { + res := make(AttributesGroup, 0, 1) + groups := make(Attributes, 0, len(attr)) + + for _, item := range attr { + if !strings.HasPrefix(item.Key, "capabilities/"+prefix) { + continue + } + + tokens := strings.Split(strings.TrimPrefix(item.Key, "capabilities/"), "/") + // skip malformed attributes + if len(tokens) < 3 { + continue + } + + // filter out prefix name + tokens = tokens[1:] + + var key string + for i, token := range tokens { + if i == 0 { + key = token + } else { + key += "/" + token + } + } + + groups = append(groups, Attribute{ + Key: key, + Value: item.Value, + }) + } + + res = append(res, groups) + + return res +} + +// IN check if given attributes are in attributes group +// AttributesGroup for storage +// - persistent: true +// class: beta1 +// - persistent: true +// class: beta2 +// +// that +// - persistent: true +// class: beta1 +func (attr Attributes) IN(group AttributesGroup) bool { + for _, group := range group { + if attr.SubsetOf(group) { + return true + } + } + return false +} + +func (attr Attributes) AnyIN(group AttributesGroup) bool { + for _, group := range group { + if attr.AnyOf(group) { + return true + } + } + return false +} diff --git a/go/node/types/attributes/v1/attribute.pb.go b/go/node/types/attributes/v1/attribute.pb.go new file mode 100644 index 00000000..711fa831 --- /dev/null +++ b/go/node/types/attributes/v1/attribute.pb.go @@ -0,0 +1,812 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/attributes/v1/attribute.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Attribute represents key value pair +type Attribute struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty" yaml:"key"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty" yaml:"value"` +} + +func (m *Attribute) Reset() { *m = Attribute{} } +func (*Attribute) ProtoMessage() {} +func (*Attribute) Descriptor() ([]byte, []int) { + return fileDescriptor_44d6ae5d18e0c0a3, []int{0} +} +func (m *Attribute) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Attribute) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Attribute.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Attribute) XXX_Merge(src proto.Message) { + xxx_messageInfo_Attribute.Merge(m, src) +} +func (m *Attribute) XXX_Size() int { + return m.Size() +} +func (m *Attribute) XXX_DiscardUnknown() { + xxx_messageInfo_Attribute.DiscardUnknown(m) +} + +var xxx_messageInfo_Attribute proto.InternalMessageInfo + +// SignedBy represents validation accounts that tenant expects signatures for provider attributes +// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many +// entries there +// this behaviour to be discussed +type SignedBy struct { + // all_of all keys in this list must have signed attributes + AllOf []string `protobuf:"bytes,1,rep,name=all_of,json=allOf,proto3" json:"all_of" yaml:"allOf"` + // any_of at least of of the keys from the list must have signed attributes + AnyOf []string `protobuf:"bytes,2,rep,name=any_of,json=anyOf,proto3" json:"any_of" yaml:"anyOf"` +} + +func (m *SignedBy) Reset() { *m = SignedBy{} } +func (*SignedBy) ProtoMessage() {} +func (*SignedBy) Descriptor() ([]byte, []int) { + return fileDescriptor_44d6ae5d18e0c0a3, []int{1} +} +func (m *SignedBy) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignedBy) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignedBy.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignedBy) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignedBy.Merge(m, src) +} +func (m *SignedBy) XXX_Size() int { + return m.Size() +} +func (m *SignedBy) XXX_DiscardUnknown() { + xxx_messageInfo_SignedBy.DiscardUnknown(m) +} + +var xxx_messageInfo_SignedBy proto.InternalMessageInfo + +// PlacementRequirements +type PlacementRequirements struct { + // SignedBy list of keys that tenants expect to have signatures from + SignedBy SignedBy `protobuf:"bytes,1,opt,name=signed_by,json=signedBy,proto3" json:"signed_by" yaml:"signed_by"` + // Attribute list of attributes tenant expects from the provider + Attributes Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=Attributes" json:"attributes" yaml:"attributes"` +} + +func (m *PlacementRequirements) Reset() { *m = PlacementRequirements{} } +func (*PlacementRequirements) ProtoMessage() {} +func (*PlacementRequirements) Descriptor() ([]byte, []int) { + return fileDescriptor_44d6ae5d18e0c0a3, []int{2} +} +func (m *PlacementRequirements) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PlacementRequirements) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_PlacementRequirements.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *PlacementRequirements) XXX_Merge(src proto.Message) { + xxx_messageInfo_PlacementRequirements.Merge(m, src) +} +func (m *PlacementRequirements) XXX_Size() int { + return m.Size() +} +func (m *PlacementRequirements) XXX_DiscardUnknown() { + xxx_messageInfo_PlacementRequirements.DiscardUnknown(m) +} + +var xxx_messageInfo_PlacementRequirements proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Attribute)(nil), "akash.base.attributes.v1.Attribute") + proto.RegisterType((*SignedBy)(nil), "akash.base.attributes.v1.SignedBy") + proto.RegisterType((*PlacementRequirements)(nil), "akash.base.attributes.v1.PlacementRequirements") +} + +func init() { + proto.RegisterFile("akash/base/attributes/v1/attribute.proto", fileDescriptor_44d6ae5d18e0c0a3) +} + +var fileDescriptor_44d6ae5d18e0c0a3 = []byte{ + // 415 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xb1, 0x8e, 0xd3, 0x30, + 0x1c, 0xc6, 0x93, 0x1e, 0x77, 0xba, 0xf8, 0x10, 0x3a, 0x22, 0x90, 0xa2, 0x1b, 0xec, 0xca, 0x08, + 0xd4, 0x85, 0x58, 0x3d, 0xc4, 0x72, 0x03, 0x12, 0x79, 0x01, 0x50, 0xd8, 0x60, 0x38, 0x9c, 0xab, + 0x9b, 0x46, 0x49, 0xe3, 0x12, 0x3b, 0x41, 0x61, 0x82, 0x8d, 0x91, 0x47, 0x60, 0xe6, 0x49, 0x3a, + 0x76, 0xec, 0x14, 0x20, 0xdd, 0x3a, 0xe6, 0x09, 0x50, 0x9c, 0x34, 0x09, 0x43, 0x27, 0xdb, 0x9f, + 0x7f, 0x9f, 0xff, 0xdf, 0xdf, 0x36, 0x98, 0xd0, 0x90, 0x8a, 0x05, 0xf1, 0xa8, 0x60, 0x84, 0x4a, + 0x99, 0x04, 0x5e, 0x2a, 0x99, 0x20, 0xd9, 0xb4, 0x5f, 0xd9, 0xab, 0x84, 0x4b, 0x6e, 0x5a, 0x8a, + 0xb4, 0x6b, 0xd2, 0xee, 0x49, 0x3b, 0x9b, 0x5e, 0x3d, 0xf2, 0xb9, 0xcf, 0x15, 0x44, 0xea, 0x59, + 0xc3, 0xe3, 0x0f, 0xc0, 0x78, 0x7d, 0xc0, 0xcc, 0x31, 0x38, 0x09, 0x59, 0x6e, 0xe9, 0x63, 0x7d, + 0x62, 0x38, 0x0f, 0xaa, 0x02, 0x81, 0x9c, 0x2e, 0xa3, 0x1b, 0x1c, 0xb2, 0x1c, 0xbb, 0xf5, 0x96, + 0xf9, 0x0c, 0x9c, 0x66, 0x34, 0x4a, 0x99, 0x35, 0x52, 0xcc, 0x65, 0x55, 0xa0, 0xfb, 0x0d, 0xa3, + 0x64, 0xec, 0x36, 0xdb, 0x37, 0xf7, 0xbe, 0xff, 0x44, 0x1a, 0xce, 0xc0, 0xf9, 0xbb, 0xc0, 0x8f, + 0xd9, 0xcc, 0xc9, 0xcd, 0x29, 0x38, 0xa3, 0x51, 0x74, 0xcb, 0xe7, 0x96, 0x3e, 0x3e, 0x99, 0x18, + 0xce, 0xd5, 0xbe, 0x40, 0xad, 0xd2, 0x1f, 0x42, 0xa3, 0xe8, 0xcd, 0x1c, 0xbb, 0xa7, 0x6a, 0x54, + 0x96, 0x38, 0xaf, 0x2d, 0xa3, 0x81, 0x45, 0x29, 0x03, 0x4b, 0x9c, 0x37, 0x96, 0x7a, 0x6c, 0xeb, + 0x7e, 0x1b, 0x81, 0xc7, 0x6f, 0x23, 0x7a, 0xc7, 0x96, 0x2c, 0x96, 0x2e, 0xfb, 0x94, 0x06, 0x89, + 0x9a, 0x0a, 0x73, 0x0e, 0x0c, 0xa1, 0x12, 0xdd, 0x7a, 0x4d, 0x9f, 0x17, 0xd7, 0xd8, 0x3e, 0x76, + 0x65, 0xf6, 0x21, 0xbc, 0xf3, 0x74, 0x5d, 0x20, 0x6d, 0x5f, 0xa0, 0xde, 0x5c, 0x15, 0xe8, 0xb2, + 0x09, 0xd0, 0x49, 0xd8, 0x3d, 0x17, 0x87, 0x6e, 0xbf, 0x00, 0xd0, 0x1f, 0xa5, 0xe2, 0x5f, 0x5c, + 0x3f, 0x39, 0x5e, 0xa8, 0x7b, 0x02, 0xe7, 0x65, 0x5b, 0x69, 0x60, 0xaf, 0x0a, 0xf4, 0xb0, 0xed, + 0xb5, 0xd3, 0xf0, 0xaf, 0xdf, 0x08, 0x74, 0x2e, 0xe1, 0x0e, 0xf0, 0xe6, 0x0e, 0x9c, 0x8f, 0xdb, + 0xbf, 0x50, 0xfb, 0x5a, 0x42, 0x6d, 0x5d, 0x42, 0x7d, 0x53, 0x42, 0xfd, 0x4f, 0x09, 0xf5, 0x1f, + 0x3b, 0xa8, 0x6d, 0x76, 0x50, 0xdb, 0xee, 0xa0, 0xf6, 0xfe, 0x95, 0x1f, 0xc8, 0x45, 0xea, 0xd9, + 0x77, 0x7c, 0x49, 0x54, 0xb2, 0xe7, 0x31, 0x93, 0x9f, 0x79, 0x12, 0xb6, 0x2b, 0xba, 0x0a, 0x88, + 0xcf, 0x49, 0xcc, 0x67, 0x8c, 0xc8, 0x7c, 0xc5, 0xc4, 0xff, 0x5f, 0xcf, 0x3b, 0x53, 0x3f, 0xe8, + 0xc5, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x6b, 0xc3, 0xa9, 0xd1, 0x9d, 0x02, 0x00, 0x00, +} + +func (m *Attribute) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Attribute) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Attribute) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Value) > 0 { + i -= len(m.Value) + copy(dAtA[i:], m.Value) + i = encodeVarintAttribute(dAtA, i, uint64(len(m.Value))) + i-- + dAtA[i] = 0x12 + } + if len(m.Key) > 0 { + i -= len(m.Key) + copy(dAtA[i:], m.Key) + i = encodeVarintAttribute(dAtA, i, uint64(len(m.Key))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SignedBy) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignedBy) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignedBy) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.AnyOf) > 0 { + for iNdEx := len(m.AnyOf) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AnyOf[iNdEx]) + copy(dAtA[i:], m.AnyOf[iNdEx]) + i = encodeVarintAttribute(dAtA, i, uint64(len(m.AnyOf[iNdEx]))) + i-- + dAtA[i] = 0x12 + } + } + if len(m.AllOf) > 0 { + for iNdEx := len(m.AllOf) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.AllOf[iNdEx]) + copy(dAtA[i:], m.AllOf[iNdEx]) + i = encodeVarintAttribute(dAtA, i, uint64(len(m.AllOf[iNdEx]))) + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *PlacementRequirements) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PlacementRequirements) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PlacementRequirements) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAttribute(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.SignedBy.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintAttribute(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintAttribute(dAtA []byte, offset int, v uint64) int { + offset -= sovAttribute(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Attribute) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovAttribute(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAttribute(uint64(l)) + } + return n +} + +func (m *SignedBy) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.AllOf) > 0 { + for _, s := range m.AllOf { + l = len(s) + n += 1 + l + sovAttribute(uint64(l)) + } + } + if len(m.AnyOf) > 0 { + for _, s := range m.AnyOf { + l = len(s) + n += 1 + l + sovAttribute(uint64(l)) + } + } + return n +} + +func (m *PlacementRequirements) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.SignedBy.Size() + n += 1 + l + sovAttribute(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovAttribute(uint64(l)) + } + } + return n +} + +func sovAttribute(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozAttribute(x uint64) (n int) { + return sovAttribute(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Attribute) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Attribute: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Attribute: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAttribute(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAttribute + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SignedBy) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignedBy: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignedBy: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AllOf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AllOf = append(m.AllOf, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field AnyOf", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.AnyOf = append(m.AnyOf, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAttribute(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAttribute + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PlacementRequirements) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PlacementRequirements: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PlacementRequirements: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SignedBy", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.SignedBy.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAttribute + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthAttribute + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthAttribute + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAttribute(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthAttribute + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAttribute(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAttribute + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAttribute + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAttribute + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthAttribute + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupAttribute + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthAttribute + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthAttribute = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAttribute = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupAttribute = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/attributes/v1/attribute_test.go b/go/node/types/attributes/v1/attribute_test.go new file mode 100644 index 00000000..113b1d7b --- /dev/null +++ b/go/node/types/attributes/v1/attribute_test.go @@ -0,0 +1,182 @@ +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +type regexTest struct { + runName string + key string + shouldPass bool +} + +func TestAttributes_Validate(t *testing.T) { + attr := Attributes{ + {Key: "key"}, + {Key: "key"}, + } + + require.EqualError(t, attr.Validate(), ErrAttributesDuplicateKeys.Error()) + + // unsupported key symbol + attr = Attributes{ + {Key: "$"}, + } + + require.EqualError(t, attr.Validate(), ErrInvalidAttributeKey.Error()) + + // empty key + attr = Attributes{ + {Key: ""}, + } + + require.EqualError(t, attr.Validate(), ErrInvalidAttributeKey.Error()) + // to long key + attr = Attributes{ + {Key: "sdgkhaeirugaeroigheirghseiargfs3ssdgkhaeirugaeroigheirghseiargfs3sdgkhaeirugaeroigheirghseiargfs3ssdgkhaeirugaeroigheirghseiargfs3"}, + } + + require.EqualError(t, attr.Validate(), ErrInvalidAttributeKey.Error()) +} + +func TestAttribute_Equal(t *testing.T) { + attr1 := &Attribute{Key: "key1", Value: "val1"} + attr2 := &Attribute{Key: "key1", Value: "val1"} + attr3 := &Attribute{Key: "key1", Value: "val2"} + + require.True(t, attr1.Equal(attr2)) + require.False(t, attr1.Equal(attr3)) +} + +func TestAttribute_SubsetOf(t *testing.T) { + attr1 := Attribute{Key: "key1", Value: "val1"} + attr2 := Attribute{Key: "key1", Value: "val1"} + attr3 := Attribute{Key: "key1", Value: "val2"} + + require.True(t, attr1.SubsetOf(attr2)) + require.False(t, attr1.SubsetOf(attr3)) +} + +func TestAttribute_AnyOf(t *testing.T) { + attr1 := Attribute{Key: "key1", Value: "val1"} + attr2 := Attribute{Key: "key1", Value: "val1"} + attr3 := Attribute{Key: "key1", Value: "val2"} + + require.True(t, attr1.SubsetOf(attr2)) + require.False(t, attr1.SubsetOf(attr3)) +} + +func TestAttributes_SubsetOf(t *testing.T) { + attr1 := Attributes{ + {Key: "key1", Value: "val1"}, + } + + attr2 := Attributes{ + {Key: "key1", Value: "val1"}, + {Key: "key2", Value: "val2"}, + } + + attr3 := Attributes{ + {Key: "key1", Value: "val1"}, + {Key: "key2", Value: "val2"}, + {Key: "key3", Value: "val3"}, + {Key: "key4", Value: "val4"}, + } + + attr4 := Attributes{ + {Key: "key3", Value: "val3"}, + {Key: "key4", Value: "val4"}, + } + + require.True(t, attr1.SubsetOf(attr2)) + require.True(t, attr2.SubsetOf(attr3)) + require.False(t, attr1.SubsetOf(attr4)) +} + +func TestAttributes_AnyOf(t *testing.T) { + attr1 := Attributes{ + {Key: "key1", Value: "val1"}, + } + + attr2 := Attributes{ + {Key: "key1", Value: "val1"}, + {Key: "key2", Value: "val2"}, + } + + attr3 := Attributes{ + {Key: "key1", Value: "val1"}, + {Key: "key2", Value: "val2"}, + {Key: "key3", Value: "val3"}, + {Key: "key4", Value: "val4"}, + } + + attr4 := Attributes{ + {Key: "key3", Value: "val3"}, + {Key: "key4", Value: "val4"}, + } + + require.True(t, attr1.AnyOf(attr2)) + require.True(t, attr2.AnyOf(attr1)) + require.True(t, attr2.AnyOf(attr3)) + require.False(t, attr1.AnyOf(attr4)) +} + +func TestAttributeRegex(t *testing.T) { + tests := []regexTest{ + { + "arbitrary key", + "key1", + true, + }, + { + "allow trailing wildcard", + "key1*", + true, + }, + { + "allow trailing wildcard", + "key1/*", + true, + }, + { + "leading wildcard is not allowed", + "*key1", + false, + }, + { + "multiple wildcards are not allowed", + "key1**", + false, + }, + { + "wildcards in the middle of key are not allowed", + "key1*/", + false, + }, + { + "wildcards in the middle of key are not allowed", + "key1/*/", + false, + }, + } + + for _, test := range tests { + t.Run(test.runName, func(t *testing.T) { + require.Equal(t, test.shouldPass, attributeNameRegexpWildcard.MatchString(test.key)) + }) + } +} + +func TestAttributes_Dup(t *testing.T) { + attrs := Attributes{ + Attribute{ + Key: "key", + Value: "val", + }, + } + + dAttrs := attrs.Dup() + require.Equal(t, attrs, dAttrs) +} diff --git a/go/node/types/attributes/v1/migrate/v1beta3.go b/go/node/types/attributes/v1/migrate/v1beta3.go new file mode 100644 index 00000000..23aea01e --- /dev/null +++ b/go/node/types/attributes/v1/migrate/v1beta3.go @@ -0,0 +1,35 @@ +package migrate + +import ( + "github.com/akash-network/akash-api/go/node/types/attributes/v1" + "github.com/akash-network/akash-api/go/node/types/v1beta3" +) + +func AttributesFromV1Beta3(from v1beta3.Attributes) v1.Attributes { + res := make(v1.Attributes, 0, len(from)) + + for _, attr := range from { + res = append(res, v1.Attribute{ + Key: attr.Key, + Value: attr.Value, + }) + } + + return res +} + +func SignedByFromV1Beta3(from v1beta3.SignedBy) v1.SignedBy { + return v1.SignedBy{ + AllOf: from.AllOf, + AnyOf: from.AnyOf, + } +} + +func PlacementRequirementsFromV1Beta3(from v1beta3.PlacementRequirements) v1.PlacementRequirements { + res := v1.PlacementRequirements{ + SignedBy: SignedByFromV1Beta3(from.SignedBy), + Attributes: AttributesFromV1Beta3(from.Attributes), + } + + return res +} diff --git a/go/node/types/resources/v1/cpu.pb.go b/go/node/types/resources/v1/cpu.pb.go new file mode 100644 index 00000000..3439b7a1 --- /dev/null +++ b/go/node/types/resources/v1/cpu.pb.go @@ -0,0 +1,424 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1/cpu.proto + +package v1 + +import ( + fmt "fmt" + github_com_akash_network_akash_api_go_node_types_attributes_v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// CPU stores resource units and cpu config attributes +type CPU struct { + Units ResourceValue `protobuf:"bytes,1,opt,name=units,proto3" json:"units"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` +} + +func (m *CPU) Reset() { *m = CPU{} } +func (m *CPU) String() string { return proto.CompactTextString(m) } +func (*CPU) ProtoMessage() {} +func (*CPU) Descriptor() ([]byte, []int) { + return fileDescriptor_6eb24b6af655c62d, []int{0} +} +func (m *CPU) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *CPU) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_CPU.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *CPU) XXX_Merge(src proto.Message) { + xxx_messageInfo_CPU.Merge(m, src) +} +func (m *CPU) XXX_Size() int { + return m.Size() +} +func (m *CPU) XXX_DiscardUnknown() { + xxx_messageInfo_CPU.DiscardUnknown(m) +} + +var xxx_messageInfo_CPU proto.InternalMessageInfo + +func (m *CPU) GetUnits() ResourceValue { + if m != nil { + return m.Units + } + return ResourceValue{} +} + +func (m *CPU) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func init() { + proto.RegisterType((*CPU)(nil), "akash.base.resources.v1.CPU") +} + +func init() { proto.RegisterFile("akash/base/resources/v1/cpu.proto", fileDescriptor_6eb24b6af655c62d) } + +var fileDescriptor_6eb24b6af655c62d = []byte{ + // 317 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0xd4, 0x4f, 0x2e, 0x28, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x07, + 0x2b, 0xd1, 0x03, 0x29, 0xd1, 0x83, 0x2b, 0xd1, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, + 0x07, 0xab, 0xd1, 0x07, 0xb1, 0x20, 0xca, 0xa5, 0x34, 0x90, 0x4c, 0x4c, 0x2c, 0x29, 0x29, 0xca, + 0x4c, 0x2a, 0x2d, 0x81, 0x18, 0x09, 0xe7, 0x41, 0x55, 0x6a, 0xe3, 0xb2, 0x1b, 0xc6, 0x29, 0x4b, + 0xcc, 0x29, 0x85, 0x2a, 0x56, 0x5a, 0xc0, 0xc4, 0xc5, 0xec, 0x1c, 0x10, 0x2a, 0xe4, 0xc4, 0xc5, + 0x5a, 0x9a, 0x97, 0x59, 0x52, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0xa4, 0xa6, 0x87, 0xc3, + 0x75, 0x7a, 0x41, 0x50, 0x4e, 0x18, 0xc8, 0x10, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0x20, + 0x5a, 0x85, 0xce, 0x31, 0x72, 0x71, 0x21, 0x9c, 0x26, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, 0x6d, 0xa4, + 0x8c, 0x6c, 0x12, 0x42, 0x16, 0x64, 0x94, 0x23, 0x8c, 0xe7, 0xd4, 0xc1, 0x08, 0x32, 0xe7, 0xd5, + 0x3d, 0x79, 0x11, 0x84, 0x0a, 0x9d, 0xfc, 0xdc, 0xcc, 0x92, 0xd4, 0xdc, 0x82, 0x92, 0xca, 0x4f, + 0xf7, 0xe4, 0xa5, 0x2b, 0x13, 0x73, 0x73, 0xac, 0x94, 0xb0, 0xc9, 0x2a, 0xad, 0xba, 0x2f, 0xef, + 0x99, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0xb6, 0x4e, 0x37, 0x2f, + 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, 0x4b, 0x2c, 0xc8, 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, + 0x4f, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0x46, 0x0d, 0x42, 0x84, 0x4b, 0x8a, 0x83, 0x90, + 0x7c, 0x60, 0xc5, 0xf2, 0x62, 0x81, 0x3c, 0xa3, 0x53, 0xf8, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, + 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, + 0x1e, 0xcb, 0x31, 0x44, 0xd9, 0x92, 0x6c, 0x2d, 0x72, 0x7c, 0x24, 0xb1, 0x81, 0xa3, 0xc0, 0x18, + 0x10, 0x00, 0x00, 0xff, 0xff, 0x20, 0x2f, 0xd6, 0x22, 0x2d, 0x02, 0x00, 0x00, +} + +func (this *CPU) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*CPU) + if !ok { + that2, ok := that.(CPU) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Units.Equal(&that1.Units) { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + return true +} +func (m *CPU) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CPU) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *CPU) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCpu(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Units.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintCpu(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintCpu(dAtA []byte, offset int, v uint64) int { + offset -= sovCpu(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *CPU) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Units.Size() + n += 1 + l + sovCpu(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovCpu(uint64(l)) + } + } + return n +} + +func sovCpu(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozCpu(x uint64) (n int) { + return sovCpu(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *CPU) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CPU: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CPU: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Units", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCpu + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCpu + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Units.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowCpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthCpu + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthCpu + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipCpu(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthCpu + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipCpu(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowCpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthCpu + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupCpu + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthCpu + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthCpu = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowCpu = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupCpu = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1/endpoint.go b/go/node/types/resources/v1/endpoint.go new file mode 100644 index 00000000..81434b44 --- /dev/null +++ b/go/node/types/resources/v1/endpoint.go @@ -0,0 +1,29 @@ +package v1 + +import ( + "sort" +) + +type Endpoints []Endpoint + +var _ sort.Interface = (*Endpoints)(nil) + +func (u Endpoints) Dup() Endpoints { + res := make(Endpoints, len(u)) + + copy(res, u) + + return res +} + +func (u Endpoints) Len() int { + return len(u) +} + +func (u Endpoints) Swap(i, j int) { + u[i], u[j] = u[j], u[i] +} + +func (u Endpoints) Less(i, j int) bool { + return u[i].SequenceNumber < u[j].SequenceNumber +} diff --git a/go/node/types/resources/v1/endpoint.pb.go b/go/node/types/resources/v1/endpoint.pb.go new file mode 100644 index 00000000..4cb76894 --- /dev/null +++ b/go/node/types/resources/v1/endpoint.pb.go @@ -0,0 +1,408 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1/endpoint.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// This describes how the endpoint is implemented when the lease is deployed +type Endpoint_Kind int32 + +const ( + // Describes an endpoint that becomes a Kubernetes Ingress + Endpoint_SHARED_HTTP Endpoint_Kind = 0 + // Describes an endpoint that becomes a Kubernetes NodePort + Endpoint_RANDOM_PORT Endpoint_Kind = 1 + // Describes an endpoint that becomes a leased IP + Endpoint_LEASED_IP Endpoint_Kind = 2 +) + +var Endpoint_Kind_name = map[int32]string{ + 0: "SHARED_HTTP", + 1: "RANDOM_PORT", + 2: "LEASED_IP", +} + +var Endpoint_Kind_value = map[string]int32{ + "SHARED_HTTP": 0, + "RANDOM_PORT": 1, + "LEASED_IP": 2, +} + +func (x Endpoint_Kind) String() string { + return proto.EnumName(Endpoint_Kind_name, int32(x)) +} + +func (Endpoint_Kind) EnumDescriptor() ([]byte, []int) { + return fileDescriptor_b997d618e77e1ab4, []int{0, 0} +} + +// Endpoint describes a publicly accessible IP service +type Endpoint struct { + Kind Endpoint_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=akash.base.resources.v1.Endpoint_Kind" json:"kind,omitempty"` + SequenceNumber uint32 `protobuf:"varint,2,opt,name=sequence_number,json=sequenceNumber,proto3" json:"sequence_number" yaml:"sequence_number"` +} + +func (m *Endpoint) Reset() { *m = Endpoint{} } +func (m *Endpoint) String() string { return proto.CompactTextString(m) } +func (*Endpoint) ProtoMessage() {} +func (*Endpoint) Descriptor() ([]byte, []int) { + return fileDescriptor_b997d618e77e1ab4, []int{0} +} +func (m *Endpoint) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Endpoint) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Endpoint.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Endpoint) XXX_Merge(src proto.Message) { + xxx_messageInfo_Endpoint.Merge(m, src) +} +func (m *Endpoint) XXX_Size() int { + return m.Size() +} +func (m *Endpoint) XXX_DiscardUnknown() { + xxx_messageInfo_Endpoint.DiscardUnknown(m) +} + +var xxx_messageInfo_Endpoint proto.InternalMessageInfo + +func (m *Endpoint) GetKind() Endpoint_Kind { + if m != nil { + return m.Kind + } + return Endpoint_SHARED_HTTP +} + +func (m *Endpoint) GetSequenceNumber() uint32 { + if m != nil { + return m.SequenceNumber + } + return 0 +} + +func init() { + proto.RegisterEnum("akash.base.resources.v1.Endpoint_Kind", Endpoint_Kind_name, Endpoint_Kind_value) + proto.RegisterType((*Endpoint)(nil), "akash.base.resources.v1.Endpoint") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1/endpoint.proto", fileDescriptor_b997d618e77e1ab4) +} + +var fileDescriptor_b997d618e77e1ab4 = []byte{ + // 326 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4b, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0xd4, 0x4f, 0xcd, 0x4b, 0x29, 0xc8, 0xcf, 0xcc, 0x2b, 0xd1, 0x2b, 0x28, 0xca, + 0x2f, 0xc9, 0x17, 0x12, 0x07, 0xab, 0xd3, 0x03, 0xa9, 0xd3, 0x83, 0xab, 0xd3, 0x2b, 0x33, 0x94, + 0x12, 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xab, 0xd1, 0x07, 0xb1, 0x20, 0xca, 0x95, 0xde, 0x33, 0x72, + 0x71, 0xb8, 0x42, 0x4d, 0x10, 0xb2, 0xe2, 0x62, 0xc9, 0xce, 0xcc, 0x4b, 0x91, 0x60, 0x54, 0x60, + 0xd4, 0xe0, 0x33, 0x52, 0xd3, 0xc3, 0x61, 0x94, 0x1e, 0x4c, 0x83, 0x9e, 0x77, 0x66, 0x5e, 0x4a, + 0x10, 0x58, 0x8f, 0x50, 0x06, 0x17, 0x7f, 0x71, 0x6a, 0x61, 0x69, 0x6a, 0x5e, 0x72, 0x6a, 0x7c, + 0x5e, 0x69, 0x6e, 0x52, 0x6a, 0x91, 0x04, 0x93, 0x02, 0xa3, 0x06, 0xaf, 0x93, 0xfd, 0xa3, 0x7b, + 0xf2, 0x7c, 0xc1, 0x50, 0x29, 0x3f, 0xb0, 0xcc, 0xab, 0x7b, 0xf2, 0xe8, 0x8a, 0x3f, 0xdd, 0x93, + 0x17, 0xab, 0x4c, 0xcc, 0xcd, 0xb1, 0x52, 0x42, 0x93, 0x50, 0x0a, 0xe2, 0x2b, 0x46, 0xd1, 0xac, + 0x64, 0xce, 0xc5, 0x02, 0xb2, 0x57, 0x88, 0x9f, 0x8b, 0x3b, 0xd8, 0xc3, 0x31, 0xc8, 0xd5, 0x25, + 0xde, 0x23, 0x24, 0x24, 0x40, 0x80, 0x01, 0x24, 0x10, 0xe4, 0xe8, 0xe7, 0xe2, 0xef, 0x1b, 0x1f, + 0xe0, 0x1f, 0x14, 0x22, 0xc0, 0x28, 0xc4, 0xcb, 0xc5, 0xe9, 0xe3, 0xea, 0x18, 0xec, 0xea, 0x12, + 0xef, 0x19, 0x20, 0xc0, 0x64, 0xc5, 0xf2, 0x62, 0x81, 0x3c, 0xa3, 0x53, 0xf8, 0x89, 0x47, 0x72, + 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, + 0x72, 0x0c, 0x37, 0x1e, 0xcb, 0x31, 0x44, 0xd9, 0xa6, 0x67, 0x96, 0x64, 0x94, 0x26, 0xe9, 0x25, + 0xe7, 0xe7, 0xea, 0x83, 0xbd, 0xae, 0x9b, 0x97, 0x5a, 0x52, 0x9e, 0x5f, 0x94, 0x0d, 0xe5, 0x25, + 0x16, 0x64, 0xea, 0xa7, 0xe7, 0xeb, 0xe7, 0xe5, 0xa7, 0xa4, 0xea, 0x97, 0x54, 0x16, 0xa4, 0x16, + 0xa3, 0x44, 0x44, 0x12, 0x1b, 0x38, 0x44, 0x8d, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0xf2, 0x63, + 0xc6, 0xf2, 0xaa, 0x01, 0x00, 0x00, +} + +func (this *Endpoint) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Endpoint) + if !ok { + that2, ok := that.(Endpoint) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.SequenceNumber != that1.SequenceNumber { + return false + } + return true +} +func (m *Endpoint) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Endpoint) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Endpoint) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.SequenceNumber != 0 { + i = encodeVarintEndpoint(dAtA, i, uint64(m.SequenceNumber)) + i-- + dAtA[i] = 0x10 + } + if m.Kind != 0 { + i = encodeVarintEndpoint(dAtA, i, uint64(m.Kind)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintEndpoint(dAtA []byte, offset int, v uint64) int { + offset -= sovEndpoint(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Endpoint) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovEndpoint(uint64(m.Kind)) + } + if m.SequenceNumber != 0 { + n += 1 + sovEndpoint(uint64(m.SequenceNumber)) + } + return n +} + +func sovEndpoint(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozEndpoint(x uint64) (n int) { + return sovEndpoint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Endpoint) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEndpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Endpoint: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Endpoint: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEndpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= Endpoint_Kind(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SequenceNumber", wireType) + } + m.SequenceNumber = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEndpoint + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SequenceNumber |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipEndpoint(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthEndpoint + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEndpoint(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEndpoint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEndpoint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEndpoint + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthEndpoint + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupEndpoint + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthEndpoint + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthEndpoint = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEndpoint = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupEndpoint = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1/gpu.pb.go b/go/node/types/resources/v1/gpu.pb.go new file mode 100644 index 00000000..beb083b7 --- /dev/null +++ b/go/node/types/resources/v1/gpu.pb.go @@ -0,0 +1,424 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1/gpu.proto + +package v1 + +import ( + fmt "fmt" + github_com_akash_network_akash_api_go_node_types_attributes_v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// GPU stores resource units and cpu config attributes +type GPU struct { + Units ResourceValue `protobuf:"bytes,1,opt,name=units,proto3" json:"units"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` +} + +func (m *GPU) Reset() { *m = GPU{} } +func (m *GPU) String() string { return proto.CompactTextString(m) } +func (*GPU) ProtoMessage() {} +func (*GPU) Descriptor() ([]byte, []int) { + return fileDescriptor_c4d2da51015d61b2, []int{0} +} +func (m *GPU) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *GPU) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_GPU.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *GPU) XXX_Merge(src proto.Message) { + xxx_messageInfo_GPU.Merge(m, src) +} +func (m *GPU) XXX_Size() int { + return m.Size() +} +func (m *GPU) XXX_DiscardUnknown() { + xxx_messageInfo_GPU.DiscardUnknown(m) +} + +var xxx_messageInfo_GPU proto.InternalMessageInfo + +func (m *GPU) GetUnits() ResourceValue { + if m != nil { + return m.Units + } + return ResourceValue{} +} + +func (m *GPU) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func init() { + proto.RegisterType((*GPU)(nil), "akash.base.resources.v1.GPU") +} + +func init() { proto.RegisterFile("akash/base/resources/v1/gpu.proto", fileDescriptor_c4d2da51015d61b2) } + +var fileDescriptor_c4d2da51015d61b2 = []byte{ + // 317 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4c, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0xd4, 0x4f, 0x2f, 0x28, 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x07, + 0x2b, 0xd1, 0x03, 0x29, 0xd1, 0x83, 0x2b, 0xd1, 0x2b, 0x33, 0x94, 0x12, 0x49, 0xcf, 0x4f, 0xcf, + 0x07, 0xab, 0xd1, 0x07, 0xb1, 0x20, 0xca, 0xa5, 0x34, 0x90, 0x4c, 0x4c, 0x2c, 0x29, 0x29, 0xca, + 0x4c, 0x2a, 0x2d, 0x81, 0x18, 0x09, 0xe7, 0x41, 0x55, 0x6a, 0xe3, 0xb2, 0x1b, 0xc6, 0x29, 0x4b, + 0xcc, 0x29, 0x85, 0x2a, 0x56, 0x5a, 0xc0, 0xc4, 0xc5, 0xec, 0x1e, 0x10, 0x2a, 0xe4, 0xc4, 0xc5, + 0x5a, 0x9a, 0x97, 0x59, 0x52, 0x2c, 0xc1, 0xa8, 0xc0, 0xa8, 0xc1, 0x6d, 0xa4, 0xa6, 0x87, 0xc3, + 0x75, 0x7a, 0x41, 0x50, 0x4e, 0x18, 0xc8, 0x10, 0x27, 0x96, 0x13, 0xf7, 0xe4, 0x19, 0x82, 0x20, + 0x5a, 0x85, 0xce, 0x31, 0x72, 0x71, 0x21, 0x9c, 0x26, 0xc1, 0xa4, 0xc0, 0xac, 0xc1, 0x6d, 0xa4, + 0x8c, 0x6c, 0x12, 0x42, 0x16, 0x64, 0x94, 0x23, 0x8c, 0xe7, 0xd4, 0xc1, 0x08, 0x32, 0xe7, 0xd5, + 0x3d, 0x79, 0x11, 0x84, 0x0a, 0x9d, 0xfc, 0xdc, 0xcc, 0x92, 0xd4, 0xdc, 0x82, 0x92, 0xca, 0x4f, + 0xf7, 0xe4, 0xa5, 0x2b, 0x13, 0x73, 0x73, 0xac, 0x94, 0xb0, 0xc9, 0x2a, 0xad, 0xba, 0x2f, 0xef, + 0x99, 0x9e, 0x59, 0x92, 0x51, 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x0f, 0xb6, 0x4e, 0x37, 0x2f, + 0xb5, 0xa4, 0x3c, 0xbf, 0x28, 0x1b, 0xca, 0x4b, 0x2c, 0xc8, 0xd4, 0x4f, 0xcf, 0xd7, 0xcf, 0xcb, + 0x4f, 0x49, 0xd5, 0x2f, 0xa9, 0x2c, 0x48, 0x2d, 0x46, 0x0d, 0x42, 0x84, 0x4b, 0x8a, 0x83, 0x90, + 0x7c, 0x60, 0xc5, 0xf2, 0x62, 0x81, 0x3c, 0xa3, 0x53, 0xf8, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, + 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0x38, 0xe1, 0xb1, 0x1c, 0xc3, 0x85, 0xc7, 0x72, 0x0c, 0x37, + 0x1e, 0xcb, 0x31, 0x44, 0xd9, 0x92, 0x6c, 0x2d, 0x72, 0x7c, 0x24, 0xb1, 0x81, 0xa3, 0xc0, 0x18, + 0x10, 0x00, 0x00, 0xff, 0xff, 0xab, 0x07, 0xd2, 0x41, 0x2d, 0x02, 0x00, 0x00, +} + +func (this *GPU) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*GPU) + if !ok { + that2, ok := that.(GPU) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Units.Equal(&that1.Units) { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + return true +} +func (m *GPU) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *GPU) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *GPU) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGpu(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Units.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGpu(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGpu(dAtA []byte, offset int, v uint64) int { + offset -= sovGpu(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *GPU) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Units.Size() + n += 1 + l + sovGpu(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovGpu(uint64(l)) + } + } + return n +} + +func sovGpu(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGpu(x uint64) (n int) { + return sovGpu(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *GPU) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: GPU: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: GPU: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Units", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGpu + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGpu + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Units.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGpu + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGpu + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGpu + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGpu(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGpu + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGpu(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGpu + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGpu + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGpu + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGpu + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGpu = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGpu = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGpu = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1/memory.pb.go b/go/node/types/resources/v1/memory.pb.go new file mode 100644 index 00000000..1185101a --- /dev/null +++ b/go/node/types/resources/v1/memory.pb.go @@ -0,0 +1,428 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1/memory.proto + +package v1 + +import ( + fmt "fmt" + github_com_akash_network_akash_api_go_node_types_attributes_v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Memory stores resource quantity and memory attributes +type Memory struct { + Quantity ResourceValue `protobuf:"bytes,1,opt,name=quantity,proto3" json:"size" yaml:"size"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,2,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` +} + +func (m *Memory) Reset() { *m = Memory{} } +func (m *Memory) String() string { return proto.CompactTextString(m) } +func (*Memory) ProtoMessage() {} +func (*Memory) Descriptor() ([]byte, []int) { + return fileDescriptor_3b893af85ee36ef5, []int{0} +} +func (m *Memory) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Memory) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Memory.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Memory) XXX_Merge(src proto.Message) { + xxx_messageInfo_Memory.Merge(m, src) +} +func (m *Memory) XXX_Size() int { + return m.Size() +} +func (m *Memory) XXX_DiscardUnknown() { + xxx_messageInfo_Memory.DiscardUnknown(m) +} + +var xxx_messageInfo_Memory proto.InternalMessageInfo + +func (m *Memory) GetQuantity() ResourceValue { + if m != nil { + return m.Quantity + } + return ResourceValue{} +} + +func (m *Memory) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func init() { + proto.RegisterType((*Memory)(nil), "akash.base.resources.v1.Memory") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1/memory.proto", fileDescriptor_3b893af85ee36ef5) +} + +var fileDescriptor_3b893af85ee36ef5 = []byte{ + // 338 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0xd4, 0xcf, 0x4d, 0xcd, 0xcd, 0x2f, 0xaa, 0xd4, 0x2b, 0x28, 0xca, 0x2f, 0xc9, + 0x17, 0x12, 0x07, 0xab, 0xd2, 0x03, 0xa9, 0xd2, 0x83, 0xab, 0xd2, 0x2b, 0x33, 0x94, 0x12, 0x49, + 0xcf, 0x4f, 0xcf, 0x07, 0xab, 0xd1, 0x07, 0xb1, 0x20, 0xca, 0xa5, 0x34, 0x90, 0x0c, 0x4d, 0x2c, + 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x81, 0x98, 0x0a, 0xe7, 0x41, 0x55, 0x6a, 0xe3, 0xb2, 0x1e, + 0xc6, 0x29, 0x4b, 0xcc, 0x29, 0x85, 0x2a, 0x56, 0x3a, 0xc0, 0xc4, 0xc5, 0xe6, 0x0b, 0x76, 0x96, + 0x50, 0x3c, 0x17, 0x47, 0x61, 0x69, 0x62, 0x5e, 0x49, 0x66, 0x49, 0xa5, 0x04, 0xa3, 0x02, 0xa3, + 0x06, 0xb7, 0x91, 0x9a, 0x1e, 0x0e, 0x37, 0xea, 0x05, 0x41, 0x39, 0x61, 0x20, 0xa3, 0x9c, 0xa4, + 0x4f, 0xdc, 0x93, 0x67, 0x78, 0x75, 0x4f, 0x9e, 0xa5, 0x38, 0xb3, 0x2a, 0xf5, 0xd3, 0x3d, 0x79, + 0xee, 0xca, 0xc4, 0xdc, 0x1c, 0x2b, 0x25, 0x10, 0x4f, 0x29, 0x08, 0x6e, 0xa8, 0xd0, 0x39, 0x46, + 0x2e, 0x2e, 0x84, 0xd3, 0x25, 0x98, 0x14, 0x98, 0x35, 0xb8, 0x8d, 0x94, 0x91, 0xed, 0x40, 0xc8, + 0x82, 0x2c, 0x71, 0x84, 0xf1, 0x9c, 0x3a, 0x18, 0xa1, 0x36, 0x88, 0x20, 0x54, 0xe8, 0xe4, 0xe7, + 0x66, 0x96, 0xa4, 0xe6, 0x16, 0x94, 0x54, 0x7e, 0xba, 0x27, 0x2f, 0x0d, 0xb1, 0x11, 0x9b, 0xac, + 0xd2, 0xaa, 0xfb, 0xf2, 0x9e, 0xe9, 0x99, 0x25, 0x19, 0xa5, 0x49, 0x7a, 0xc9, 0xf9, 0xb9, 0xfa, + 0x60, 0xeb, 0x74, 0xf3, 0x52, 0x4b, 0xca, 0xf3, 0x8b, 0xb2, 0xa1, 0xbc, 0xc4, 0x82, 0x4c, 0xfd, + 0xf4, 0x7c, 0xfd, 0xbc, 0xfc, 0x94, 0x54, 0xfd, 0x92, 0xca, 0x82, 0xd4, 0x62, 0xd4, 0x20, 0x46, + 0xb8, 0xa4, 0x38, 0x08, 0xc9, 0x07, 0x56, 0x2c, 0x2f, 0x16, 0xc8, 0x33, 0x3a, 0x85, 0x9f, 0x78, + 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x5c, + 0x78, 0x2c, 0xc7, 0x70, 0xe3, 0xb1, 0x1c, 0x43, 0x94, 0x2d, 0xc9, 0xd6, 0x22, 0xc7, 0x57, 0x12, + 0x1b, 0x38, 0x8a, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, 0x9c, 0xbc, 0x92, 0x7a, 0x50, 0x02, + 0x00, 0x00, +} + +func (this *Memory) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Memory) + if !ok { + that2, ok := that.(Memory) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Quantity.Equal(&that1.Quantity) { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + return true +} +func (m *Memory) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Memory) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Memory) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMemory(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintMemory(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintMemory(dAtA []byte, offset int, v uint64) int { + offset -= sovMemory(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Memory) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Quantity.Size() + n += 1 + l + sovMemory(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovMemory(uint64(l)) + } + } + return n +} + +func sovMemory(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozMemory(x uint64) (n int) { + return sovMemory(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Memory) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMemory + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Memory: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Memory: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMemory + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMemory + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMemory + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMemory + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMemory + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthMemory + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMemory(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthMemory + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMemory(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMemory + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMemory + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMemory + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthMemory + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupMemory + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthMemory + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthMemory = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMemory = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupMemory = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1/migrate/v1beta3.go b/go/node/types/resources/v1/migrate/v1beta3.go new file mode 100644 index 00000000..bf14ea20 --- /dev/null +++ b/go/node/types/resources/v1/migrate/v1beta3.go @@ -0,0 +1,82 @@ +package migrate + +import ( + attr "github.com/akash-network/akash-api/go/node/types/attributes/v1/migrate" + "github.com/akash-network/akash-api/go/node/types/resources/v1" + "github.com/akash-network/akash-api/go/node/types/v1beta3" +) + +func ResourceValueFromV1Beta3(from v1beta3.ResourceValue) v1.ResourceValue { + return v1.NewResourceValue(from.Value()) +} + +func CPUFromV1Beta3(from *v1beta3.CPU) *v1.CPU { + if from == nil { + return nil + } + + return &v1.CPU{ + Units: ResourceValueFromV1Beta3(from.Units), + Attributes: attr.AttributesFromV1Beta3(from.Attributes), + } +} + +func GPUFromV1Beta3(from *v1beta3.GPU) *v1.GPU { + if from == nil { + return nil + } + + return &v1.GPU{ + Units: ResourceValueFromV1Beta3(from.Units), + Attributes: attr.AttributesFromV1Beta3(from.Attributes), + } +} + +func MemoryFromV1Beta3(from *v1beta3.Memory) *v1.Memory { + if from == nil { + return nil + } + + return &v1.Memory{ + Quantity: ResourceValueFromV1Beta3(from.Quantity), + Attributes: attr.AttributesFromV1Beta3(from.Attributes), + } +} + +func VolumesFromV1Beta3(from v1beta3.Volumes) v1.Volumes { + res := make(v1.Volumes, 0, len(from)) + + for _, storage := range from { + res = append(res, v1.Storage{ + Name: "default", + Quantity: ResourceValueFromV1Beta3(storage.Quantity), + Attributes: attr.AttributesFromV1Beta3(storage.Attributes), + }) + } + + return res +} + +func EndpointsFromV1Beta3(from []v1beta3.Endpoint) []v1.Endpoint { + res := make([]v1.Endpoint, 0, len(from)) + + for _, endpoint := range from { + res = append(res, v1.Endpoint{ + Kind: v1.Endpoint_Kind(endpoint.Kind), + SequenceNumber: endpoint.SequenceNumber, + }) + } + + return res +} + +func ResourcesFromV1Beta3(id uint32, from v1beta3.Resources) v1.Resources { + return v1.Resources{ + ID: id, + CPU: CPUFromV1Beta3(from.CPU), + GPU: GPUFromV1Beta3(from.GPU), + Memory: MemoryFromV1Beta3(from.Memory), + Storage: VolumesFromV1Beta3(from.Storage), + Endpoints: EndpointsFromV1Beta3(from.Endpoints), + } +} diff --git a/go/node/types/resources/v1/requirements.go b/go/node/types/resources/v1/requirements.go new file mode 100644 index 00000000..13b9b640 --- /dev/null +++ b/go/node/types/resources/v1/requirements.go @@ -0,0 +1,15 @@ +package v1 + +import ( + "gopkg.in/yaml.v3" +) + +func (m *SignedBy) String() string { + res, _ := yaml.Marshal(m) + return string(res) +} + +func (m *PlacementRequirements) String() string { + res, _ := yaml.Marshal(m) + return string(res) +} diff --git a/go/node/types/resources/v1/resources.go b/go/node/types/resources/v1/resources.go new file mode 100644 index 00000000..ad85ea47 --- /dev/null +++ b/go/node/types/resources/v1/resources.go @@ -0,0 +1,194 @@ +package v1 + +import ( + "fmt" +) + +type UnitType int + +type Unit interface { + String() string +} + +type ResUnit interface { + Equals(ResUnit) bool + Add(unit ResUnit) bool +} + +type Volumes []Storage + +var _ Unit = (*CPU)(nil) +var _ Unit = (*Memory)(nil) +var _ Unit = (*Storage)(nil) +var _ Unit = (*GPU)(nil) + +func (m Resources) Validate() error { + if m.ID == 0 { + return fmt.Errorf("resources ID must be > 0") + } + + if m.CPU == nil { + return fmt.Errorf("CPU must not be nil") + } + + if m.GPU == nil { + return fmt.Errorf("GPU must not be nil") + } + + if m.Memory == nil { + return fmt.Errorf("memory must not be nil") + } + + if m.Storage == nil { + return fmt.Errorf("storage must not be nil") + } + + if m.Endpoints == nil { + return fmt.Errorf("endpoints must not be nil") + } + + return nil +} + +func (m Resources) Dup() Resources { + res := Resources{ + ID: m.ID, + CPU: m.CPU.Dup(), + GPU: m.GPU.Dup(), + Memory: m.Memory.Dup(), + Storage: m.Storage.Dup(), + Endpoints: m.Endpoints.Dup(), + } + + return res +} + +func (m Resources) In(rhs Resources) bool { + if !m.CPU.Equal(rhs.CPU) || !m.GPU.Equal(rhs.GPU) || + !m.Memory.Equal(rhs.Memory) || !m.Storage.Equal(rhs.Storage) { + return false + } + +loop: + for _, ep := range m.Endpoints { + for _, rep := range rhs.Endpoints { + if ep.Equal(rep) { + continue loop + } + } + + return false + } + + return true +} + +func (m CPU) Dup() *CPU { + return &CPU{ + Units: m.Units.Dup(), + Attributes: m.Attributes.Dup(), + } +} + +func (m Memory) Dup() *Memory { + return &Memory{ + Quantity: m.Quantity.Dup(), + Attributes: m.Attributes.Dup(), + } +} + +func (m Storage) Dup() *Storage { + return &Storage{ + Name: m.Name, + Quantity: m.Quantity.Dup(), + Attributes: m.Attributes.Dup(), + } +} + +func (m GPU) Dup() *GPU { + return &GPU{ + Units: m.Units.Dup(), + Attributes: m.Attributes.Dup(), + } +} + +func (m Volumes) Equal(rhs Volumes) bool { + for i := range m { + if !m[i].Equal(rhs[i]) { + return false + } + } + + return true +} + +func (m Volumes) Dup() Volumes { + res := make(Volumes, 0, len(m)) + + for _, storage := range m { + res = append(res, *storage.Dup()) + } + + return res +} + +func (m *CPU) EqualUnits(that *CPU) bool { + if that == nil { + return m == nil + } else if m == nil { + return false + } + + if !m.Units.Equal(&that.Units) { + return false + } + + return true +} + +func (m *GPU) EqualUnits(that *GPU) bool { + if that == nil { + return m == nil + } else if m == nil { + return false + } + + if !m.Units.Equal(&that.Units) { + return false + } + + return true +} + +func (m *Memory) EqualUnits(that *Memory) bool { + if that == nil { + return m == nil + } else if m == nil { + return false + } + + if !m.Quantity.Equal(&that.Quantity) { + return false + } + + return true +} + +func (m Volumes) EqualUnits(that Volumes) bool { + if len(m) != len(that) { + return false + } + + for idx, vol := range m { + if vol.Name != that[idx].Name { + return false + } + + if !vol.Quantity.Equal(&that[idx].Quantity) { + return false + } + + } + + return true +} diff --git a/go/node/types/resources/v1/resources.pb.go b/go/node/types/resources/v1/resources.pb.go new file mode 100644 index 00000000..e1c999fe --- /dev/null +++ b/go/node/types/resources/v1/resources.pb.go @@ -0,0 +1,677 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1/resources.proto + +package v1 + +import ( + fmt "fmt" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Resources describes all available resources types for deployment/node etc +// if field is nil resource is not present in the given data-structure +type Resources struct { + ID uint32 `protobuf:"varint,1,opt,name=id,proto3" json:"id" yaml:"id"` + CPU *CPU `protobuf:"bytes,2,opt,name=cpu,proto3" json:"cpu,omitempty" yaml:"cpu,omitempty"` + Memory *Memory `protobuf:"bytes,3,opt,name=memory,proto3" json:"memory,omitempty" yaml:"memory,omitempty"` + Storage Volumes `protobuf:"bytes,4,rep,name=storage,proto3,castrepeated=Volumes" json:"storage,omitempty" yaml:"storage,omitempty"` + GPU *GPU `protobuf:"bytes,5,opt,name=gpu,proto3" json:"gpu,omitempty" yaml:"gpu,omitempty"` + Endpoints Endpoints `protobuf:"bytes,6,rep,name=endpoints,proto3,castrepeated=Endpoints" json:"endpoints,omitempty" yaml:"endpoints,omitempty"` +} + +func (m *Resources) Reset() { *m = Resources{} } +func (m *Resources) String() string { return proto.CompactTextString(m) } +func (*Resources) ProtoMessage() {} +func (*Resources) Descriptor() ([]byte, []int) { + return fileDescriptor_e333f38b73a40dee, []int{0} +} +func (m *Resources) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Resources) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Resources.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Resources) XXX_Merge(src proto.Message) { + xxx_messageInfo_Resources.Merge(m, src) +} +func (m *Resources) XXX_Size() int { + return m.Size() +} +func (m *Resources) XXX_DiscardUnknown() { + xxx_messageInfo_Resources.DiscardUnknown(m) +} + +var xxx_messageInfo_Resources proto.InternalMessageInfo + +func (m *Resources) GetID() uint32 { + if m != nil { + return m.ID + } + return 0 +} + +func (m *Resources) GetCPU() *CPU { + if m != nil { + return m.CPU + } + return nil +} + +func (m *Resources) GetMemory() *Memory { + if m != nil { + return m.Memory + } + return nil +} + +func (m *Resources) GetStorage() Volumes { + if m != nil { + return m.Storage + } + return nil +} + +func (m *Resources) GetGPU() *GPU { + if m != nil { + return m.GPU + } + return nil +} + +func (m *Resources) GetEndpoints() Endpoints { + if m != nil { + return m.Endpoints + } + return nil +} + +func init() { + proto.RegisterType((*Resources)(nil), "akash.base.resources.v1.Resources") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1/resources.proto", fileDescriptor_e333f38b73a40dee) +} + +var fileDescriptor_e333f38b73a40dee = []byte{ + // 485 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x92, 0xcf, 0x6b, 0xd4, 0x40, + 0x14, 0xc7, 0x77, 0xb2, 0xeb, 0x96, 0x4d, 0x29, 0x68, 0x2c, 0x34, 0x2e, 0x92, 0xd9, 0x06, 0x7f, + 0xac, 0xa0, 0x09, 0x6d, 0x3d, 0x15, 0x04, 0x49, 0xd5, 0xe2, 0x41, 0x28, 0x91, 0x2a, 0x78, 0xcb, + 0x26, 0xc3, 0x34, 0x74, 0xb3, 0x33, 0x64, 0x92, 0xca, 0x82, 0x77, 0xaf, 0xfe, 0x09, 0x9e, 0xfd, + 0x4b, 0xf6, 0xd8, 0xa3, 0xa7, 0x51, 0xb2, 0x17, 0xd9, 0x63, 0xff, 0x00, 0x91, 0x64, 0x26, 0x5d, + 0xd3, 0x76, 0xd8, 0xdb, 0xce, 0xbe, 0xcf, 0x7b, 0x9f, 0xbc, 0x2f, 0x4f, 0x7f, 0x1c, 0x9c, 0x06, + 0xec, 0xc4, 0x1d, 0x05, 0x0c, 0xb9, 0x29, 0x62, 0x24, 0x4f, 0x43, 0xc4, 0xdc, 0xb3, 0x9d, 0xe5, + 0xc3, 0xa1, 0x29, 0xc9, 0x88, 0xb1, 0x55, 0x81, 0x4e, 0x09, 0x3a, 0xcb, 0xda, 0xd9, 0x4e, 0x7f, + 0x13, 0x13, 0x4c, 0x2a, 0xc6, 0x2d, 0x7f, 0x09, 0xbc, 0xbf, 0xad, 0x9a, 0x1b, 0xd2, 0x7c, 0x15, + 0x82, 0x2f, 0x91, 0x07, 0x2a, 0x24, 0x41, 0x09, 0x49, 0xa7, 0x92, 0x7a, 0xa8, 0xa2, 0x58, 0x46, + 0xd2, 0x00, 0x23, 0x89, 0x3d, 0x52, 0x61, 0x68, 0x12, 0x51, 0x12, 0x4f, 0x32, 0xc1, 0xd9, 0x7f, + 0x3b, 0x7a, 0xcf, 0xaf, 0xeb, 0xc6, 0x13, 0x5d, 0x8b, 0x23, 0x13, 0x0c, 0xc0, 0x70, 0xc3, 0xbb, + 0x57, 0x70, 0xa8, 0xbd, 0x7d, 0xb5, 0xe0, 0x50, 0x8b, 0xa3, 0x0b, 0x0e, 0x7b, 0xd3, 0x20, 0x19, + 0xef, 0xdb, 0x71, 0x64, 0xfb, 0x5a, 0x1c, 0x19, 0x91, 0xde, 0x0e, 0x69, 0x6e, 0x6a, 0x03, 0x30, + 0x5c, 0xdf, 0xbd, 0xef, 0x28, 0x02, 0x73, 0x0e, 0x8e, 0x8e, 0xbd, 0xe7, 0x33, 0x0e, 0x41, 0xc1, + 0x61, 0xfb, 0xe0, 0xe8, 0x78, 0xc1, 0xe1, 0x46, 0x48, 0xf3, 0xa7, 0x24, 0x89, 0x33, 0x94, 0xd0, + 0x6c, 0x7a, 0xc1, 0xe1, 0xa6, 0x98, 0xdc, 0xf8, 0xdb, 0xf6, 0xcb, 0xf1, 0xc6, 0x58, 0xef, 0x8a, + 0xed, 0xcd, 0x76, 0x25, 0x82, 0x4a, 0xd1, 0xbb, 0x0a, 0xf3, 0xf6, 0x4a, 0xd7, 0x82, 0xc3, 0xdb, + 0xa2, 0xad, 0xe1, 0xd9, 0x12, 0x9e, 0xab, 0x15, 0xdb, 0x97, 0x0e, 0xe3, 0x8b, 0xbe, 0x26, 0x53, + 0x34, 0x3b, 0x83, 0xf6, 0x70, 0x7d, 0x77, 0xa0, 0xd4, 0xbd, 0x17, 0x9c, 0xf7, 0x72, 0xc6, 0x61, + 0x6b, 0xc1, 0xe1, 0x1d, 0xd9, 0xd8, 0x10, 0x9a, 0x42, 0x78, 0xad, 0x64, 0xff, 0xf8, 0x05, 0xd7, + 0x3e, 0x90, 0x71, 0x9e, 0x20, 0xe6, 0xd7, 0xca, 0x32, 0x51, 0x4c, 0x73, 0xf3, 0xd6, 0x8a, 0x44, + 0x0f, 0xff, 0x4f, 0xf4, 0x50, 0x24, 0x8a, 0x6f, 0x4e, 0x14, 0x5f, 0x49, 0x14, 0xd3, 0xdc, 0xf8, + 0x0a, 0xf4, 0x5e, 0x7d, 0x03, 0xcc, 0xec, 0x56, 0x6b, 0x6e, 0x2b, 0x65, 0xaf, 0x25, 0xe9, 0xbd, + 0x91, 0x7b, 0xde, 0xbd, 0xec, 0x6d, 0x08, 0xfb, 0x42, 0x78, 0x43, 0xb1, 0xdc, 0xb5, 0x57, 0x8f, + 0x61, 0xfe, 0xd2, 0xbd, 0xdf, 0xf9, 0xf3, 0x1d, 0x02, 0xef, 0xe3, 0xac, 0xb0, 0xc0, 0x79, 0x61, + 0x81, 0xdf, 0x85, 0x05, 0xbe, 0xcd, 0xad, 0xd6, 0xf9, 0xdc, 0x6a, 0xfd, 0x9c, 0x5b, 0xad, 0x4f, + 0x2f, 0x70, 0x9c, 0x9d, 0xe4, 0x23, 0x27, 0x24, 0x89, 0x5b, 0x7d, 0xdf, 0xb3, 0x09, 0xca, 0x3e, + 0x93, 0xf4, 0x54, 0xbe, 0x02, 0x1a, 0xbb, 0x98, 0xb8, 0x13, 0x12, 0x21, 0x37, 0x9b, 0x52, 0xc4, + 0x1a, 0x87, 0x3e, 0xea, 0x56, 0x07, 0xbe, 0xf7, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xeb, 0x1d, 0x2c, + 0x21, 0xf5, 0x03, 0x00, 0x00, +} + +func (this *Resources) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Resources) + if !ok { + that2, ok := that.(Resources) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.ID != that1.ID { + return false + } + if !this.CPU.Equal(that1.CPU) { + return false + } + if !this.Memory.Equal(that1.Memory) { + return false + } + if len(this.Storage) != len(that1.Storage) { + return false + } + for i := range this.Storage { + if !this.Storage[i].Equal(&that1.Storage[i]) { + return false + } + } + if !this.GPU.Equal(that1.GPU) { + return false + } + if len(this.Endpoints) != len(that1.Endpoints) { + return false + } + for i := range this.Endpoints { + if !this.Endpoints[i].Equal(&that1.Endpoints[i]) { + return false + } + } + return true +} +func (m *Resources) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Resources) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Resources) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Endpoints) > 0 { + for iNdEx := len(m.Endpoints) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Endpoints[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResources(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } + if m.GPU != nil { + { + size, err := m.GPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResources(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + if len(m.Storage) > 0 { + for iNdEx := len(m.Storage) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Storage[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResources(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + if m.Memory != nil { + { + size, err := m.Memory.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResources(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + if m.CPU != nil { + { + size, err := m.CPU.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintResources(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.ID != 0 { + i = encodeVarintResources(dAtA, i, uint64(m.ID)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func encodeVarintResources(dAtA []byte, offset int, v uint64) int { + offset -= sovResources(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Resources) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.ID != 0 { + n += 1 + sovResources(uint64(m.ID)) + } + if m.CPU != nil { + l = m.CPU.Size() + n += 1 + l + sovResources(uint64(l)) + } + if m.Memory != nil { + l = m.Memory.Size() + n += 1 + l + sovResources(uint64(l)) + } + if len(m.Storage) > 0 { + for _, e := range m.Storage { + l = e.Size() + n += 1 + l + sovResources(uint64(l)) + } + } + if m.GPU != nil { + l = m.GPU.Size() + n += 1 + l + sovResources(uint64(l)) + } + if len(m.Endpoints) > 0 { + for _, e := range m.Endpoints { + l = e.Size() + n += 1 + l + sovResources(uint64(l)) + } + } + return n +} + +func sovResources(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozResources(x uint64) (n int) { + return sovResources(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Resources) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Resources: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Resources: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + m.ID = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.ID |= uint32(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResources + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResources + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.CPU == nil { + m.CPU = &CPU{} + } + if err := m.CPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Memory", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResources + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResources + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Memory == nil { + m.Memory = &Memory{} + } + if err := m.Memory.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Storage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResources + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResources + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Storage = append(m.Storage, Storage{}) + if err := m.Storage[len(m.Storage)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field GPU", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResources + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResources + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.GPU == nil { + m.GPU = &GPU{} + } + if err := m.GPU.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Endpoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResources + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthResources + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthResources + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Endpoints = append(m.Endpoints, Endpoint{}) + if err := m.Endpoints[len(m.Endpoints)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResources(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthResources + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResources(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResources + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResources + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResources + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthResources + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupResources + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthResources + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthResources = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResources = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupResources = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1/resources_test.go b/go/node/types/resources/v1/resources_test.go new file mode 100644 index 00000000..e52c1422 --- /dev/null +++ b/go/node/types/resources/v1/resources_test.go @@ -0,0 +1,143 @@ +package v1 + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestVolumes_Dup(t *testing.T) { + volumes := Volumes{ + Storage{ + Name: "default", + Quantity: NewResourceValue(100), + Attributes: Attributes{}, + }, + } + + dVolumes := volumes.Dup() + + require.Equal(t, volumes, dVolumes) +} + +func TestVolumes_Equal(t *testing.T) { + volumes := Volumes{ + Storage{ + Name: "default", + Quantity: NewResourceValue(100), + Attributes: Attributes{}, + }, + } + + dVolumes := volumes.Dup() + + require.True(t, volumes.Equal(dVolumes)) + + dVolumes[0].Name = "class2" + require.False(t, volumes.Equal(dVolumes)) +} + +func TestResources_ValidateID(t *testing.T) { + res := Resources{} + + err := res.Validate() + require.ErrorContains(t, err, "resources ID must be > 0") +} + +func TestResources_ValidateCPU(t *testing.T) { + res := Resources{ + ID: 1, + } + + err := res.Validate() + require.ErrorContains(t, err, "CPU must not be nil") +} + +func TestResources_ValidateGPU(t *testing.T) { + res := Resources{ + ID: 1, + CPU: &CPU{}, + } + + err := res.Validate() + require.ErrorContains(t, err, "GPU must not be nil") +} + +func TestResources_ValidateMemory(t *testing.T) { + res := Resources{ + ID: 1, + CPU: &CPU{}, + GPU: &GPU{}, + } + + err := res.Validate() + require.ErrorContains(t, err, "memory must not be nil") +} + +func TestResources_ValidateStorage(t *testing.T) { + res := Resources{ + ID: 1, + CPU: &CPU{}, + GPU: &GPU{}, + Memory: &Memory{}, + } + + err := res.Validate() + require.ErrorContains(t, err, "storage must not be nil") +} + +func TestResources_ValidateEndpoints(t *testing.T) { + res := Resources{ + ID: 1, + CPU: &CPU{}, + GPU: &GPU{}, + Memory: &Memory{}, + Storage: make(Volumes, 0), + } + + err := res.Validate() + require.ErrorContains(t, err, "endpoints must not be nil") +} + +func TestResources_Validate(t *testing.T) { + res := Resources{ + ID: 1, + CPU: &CPU{}, + GPU: &GPU{}, + Memory: &Memory{}, + Storage: make(Volumes, 0), + Endpoints: make(Endpoints, 0), + } + + err := res.Validate() + require.NoError(t, err) +} + +func TestResources_DupInvalidID(t *testing.T) { + res := Resources{ + CPU: &CPU{}, + GPU: &GPU{}, + Memory: &Memory{}, + Storage: make(Volumes, 0), + Endpoints: make(Endpoints, 0), + } + + dup := res.Dup() + err := dup.Validate() + require.ErrorContains(t, err, "resources ID must be > 0") +} + +func TestResources_DupValid(t *testing.T) { + res := Resources{ + ID: 1, + CPU: &CPU{}, + GPU: &GPU{}, + Memory: &Memory{}, + Storage: make(Volumes, 0), + Endpoints: make(Endpoints, 0), + } + + dup := res.Dup() + err := dup.Validate() + require.NoError(t, err) +} diff --git a/go/node/types/resources/v1/resourcevalue.go b/go/node/types/resources/v1/resourcevalue.go new file mode 100644 index 00000000..8595e39d --- /dev/null +++ b/go/node/types/resources/v1/resourcevalue.go @@ -0,0 +1,57 @@ +package v1 + +import ( + sdk "github.com/cosmos/cosmos-sdk/types" + "github.com/pkg/errors" +) + +var ( + ErrOverflow = errors.Errorf("resource value overflow") + ErrCannotSub = errors.Errorf("cannot subtract resources when lhs does not have same units as rhs") + ErrNegativeResult = errors.Errorf("result of subtraction is negative") +) + +/* +ResourceValue the big point of this small change is to ensure math operations on resources +not resulting with negative value which panic on unsigned types as well as overflow which leads to panic too +instead reasonable error is returned. +Each resource using this type as value can take extra advantage of it to check upper bounds +For example in SDL v1 CPU units were handled as uint32 and operation like math.MaxUint32 + 2 +would cause application to panic. But nowadays + const CPULimit = math.MaxUint32 + + func (c *CPU) add(rhs CPU) error { + res, err := c.Units.add(rhs.Units) + if err != nil { + return err + } + + if res.Units.Value() > CPULimit { + return ErrOverflow + } + + c.Units = res + + return nil + } +*/ + +func NewResourceValue(val uint64) ResourceValue { + res := ResourceValue{ + Val: sdk.NewIntFromUint64(val), + } + + return res +} + +func (m ResourceValue) Value() uint64 { + return m.Val.Uint64() +} + +func (m ResourceValue) Dup() ResourceValue { + res := ResourceValue{ + Val: sdk.NewIntFromBigInt(m.Val.BigInt()), + } + + return res +} diff --git a/go/node/types/resources/v1/resourcevalue.pb.go b/go/node/types/resources/v1/resourcevalue.pb.go new file mode 100644 index 00000000..ec257e34 --- /dev/null +++ b/go/node/types/resources/v1/resourcevalue.pb.go @@ -0,0 +1,343 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1/resourcevalue.proto + +package v1 + +import ( + fmt "fmt" + github_com_cosmos_cosmos_sdk_types "github.com/cosmos/cosmos-sdk/types" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Unit stores cpu, memory and storage metrics +type ResourceValue struct { + Val github_com_cosmos_cosmos_sdk_types.Int `protobuf:"bytes,1,opt,name=val,proto3,customtype=github.com/cosmos/cosmos-sdk/types.Int" json:"val"` +} + +func (m *ResourceValue) Reset() { *m = ResourceValue{} } +func (m *ResourceValue) String() string { return proto.CompactTextString(m) } +func (*ResourceValue) ProtoMessage() {} +func (*ResourceValue) Descriptor() ([]byte, []int) { + return fileDescriptor_3f765cba700bf328, []int{0} +} +func (m *ResourceValue) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ResourceValue) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ResourceValue.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ResourceValue) XXX_Merge(src proto.Message) { + xxx_messageInfo_ResourceValue.Merge(m, src) +} +func (m *ResourceValue) XXX_Size() int { + return m.Size() +} +func (m *ResourceValue) XXX_DiscardUnknown() { + xxx_messageInfo_ResourceValue.DiscardUnknown(m) +} + +var xxx_messageInfo_ResourceValue proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ResourceValue)(nil), "akash.base.resources.v1.ResourceValue") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1/resourcevalue.proto", fileDescriptor_3f765cba700bf328) +} + +var fileDescriptor_3f765cba700bf328 = []byte{ + // 228 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xd2, 0x4e, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0x84, 0x73, 0xca, 0x12, 0x73, 0x4a, 0x53, 0xf5, 0x0a, 0x8a, 0xf2, 0x4b, 0xf2, + 0x85, 0xc4, 0xc1, 0x8a, 0xf5, 0x40, 0x8a, 0xf5, 0xe0, 0x8a, 0xf5, 0xca, 0x0c, 0xa5, 0x44, 0xd2, + 0xf3, 0xd3, 0xf3, 0xc1, 0x6a, 0xf4, 0x41, 0x2c, 0x88, 0x72, 0xa5, 0x70, 0x2e, 0xde, 0x20, 0xa8, + 0xaa, 0x30, 0x90, 0x29, 0x42, 0x0e, 0x5c, 0xcc, 0x65, 0x89, 0x39, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, + 0x3c, 0x4e, 0x7a, 0x27, 0xee, 0xc9, 0x33, 0xdc, 0xba, 0x27, 0xaf, 0x96, 0x9e, 0x59, 0x92, 0x51, + 0x9a, 0xa4, 0x97, 0x9c, 0x9f, 0xab, 0x9f, 0x9c, 0x5f, 0x9c, 0x9b, 0x5f, 0x0c, 0xa5, 0x74, 0x8b, + 0x53, 0xb2, 0xf5, 0x4b, 0x2a, 0x0b, 0x52, 0x8b, 0xf5, 0x3c, 0xf3, 0x4a, 0x82, 0x40, 0x5a, 0xad, + 0x58, 0x5e, 0x2c, 0x90, 0x67, 0x74, 0x0a, 0x3f, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, + 0x07, 0x8f, 0xe4, 0x18, 0x27, 0x3c, 0x96, 0x63, 0xb8, 0xf0, 0x58, 0x8e, 0xe1, 0xc6, 0x63, 0x39, + 0x86, 0x28, 0x5b, 0x24, 0xc3, 0xc0, 0x8e, 0xd5, 0xcd, 0x4b, 0x2d, 0x29, 0xcf, 0x2f, 0xca, 0x86, + 0xf2, 0x12, 0x0b, 0x32, 0xf5, 0xd3, 0xf3, 0xf5, 0xf3, 0xf2, 0x53, 0x52, 0x21, 0x46, 0xa3, 0x78, + 0x3a, 0x89, 0x0d, 0xec, 0x70, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x35, 0x18, 0xc0, 0x86, + 0x16, 0x01, 0x00, 0x00, +} + +func (this *ResourceValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ResourceValue) + if !ok { + that2, ok := that.(ResourceValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.Val.Equal(that1.Val) { + return false + } + return true +} +func (m *ResourceValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ResourceValue) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ResourceValue) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + { + size := m.Val.Size() + i -= size + if _, err := m.Val.MarshalTo(dAtA[i:]); err != nil { + return 0, err + } + i = encodeVarintResourcevalue(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintResourcevalue(dAtA []byte, offset int, v uint64) int { + offset -= sovResourcevalue(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ResourceValue) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.Val.Size() + n += 1 + l + sovResourcevalue(uint64(l)) + return n +} + +func sovResourcevalue(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozResourcevalue(x uint64) (n int) { + return sovResourcevalue(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ResourceValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ResourceValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ResourceValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Val", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthResourcevalue + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthResourcevalue + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Val.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipResourcevalue(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthResourcevalue + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipResourcevalue(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowResourcevalue + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthResourcevalue + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupResourcevalue + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthResourcevalue + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthResourcevalue = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowResourcevalue = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupResourcevalue = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/resources/v1/storage.pb.go b/go/node/types/resources/v1/storage.pb.go new file mode 100644 index 00000000..496afd03 --- /dev/null +++ b/go/node/types/resources/v1/storage.pb.go @@ -0,0 +1,483 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: akash/base/resources/v1/storage.proto + +package v1 + +import ( + fmt "fmt" + github_com_akash_network_akash_api_go_node_types_attributes_v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + v1 "github.com/akash-network/akash-api/go/node/types/attributes/v1" + _ "github.com/gogo/protobuf/gogoproto" + proto "github.com/gogo/protobuf/proto" + io "io" + math "math" + math_bits "math/bits" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +// Storage stores resource quantity and storage attributes +type Storage struct { + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name" yaml:"name"` + Quantity ResourceValue `protobuf:"bytes,2,opt,name=quantity,proto3" json:"size" yaml:"size"` + Attributes github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes `protobuf:"bytes,3,rep,name=attributes,proto3,castrepeated=github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes" json:"attributes,omitempty" yaml:"attributes,omitempty"` +} + +func (m *Storage) Reset() { *m = Storage{} } +func (m *Storage) String() string { return proto.CompactTextString(m) } +func (*Storage) ProtoMessage() {} +func (*Storage) Descriptor() ([]byte, []int) { + return fileDescriptor_3700e07ed3ea303d, []int{0} +} +func (m *Storage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Storage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Storage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *Storage) XXX_Merge(src proto.Message) { + xxx_messageInfo_Storage.Merge(m, src) +} +func (m *Storage) XXX_Size() int { + return m.Size() +} +func (m *Storage) XXX_DiscardUnknown() { + xxx_messageInfo_Storage.DiscardUnknown(m) +} + +var xxx_messageInfo_Storage proto.InternalMessageInfo + +func (m *Storage) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Storage) GetQuantity() ResourceValue { + if m != nil { + return m.Quantity + } + return ResourceValue{} +} + +func (m *Storage) GetAttributes() github_com_akash_network_akash_api_go_node_types_attributes_v1.Attributes { + if m != nil { + return m.Attributes + } + return nil +} + +func init() { + proto.RegisterType((*Storage)(nil), "akash.base.resources.v1.Storage") +} + +func init() { + proto.RegisterFile("akash/base/resources/v1/storage.proto", fileDescriptor_3700e07ed3ea303d) +} + +var fileDescriptor_3700e07ed3ea303d = []byte{ + // 365 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x4d, 0xcc, 0x4e, 0x2c, + 0xce, 0xd0, 0x4f, 0x4a, 0x2c, 0x4e, 0xd5, 0x2f, 0x4a, 0x2d, 0xce, 0x2f, 0x2d, 0x4a, 0x4e, 0x2d, + 0xd6, 0x2f, 0x33, 0xd4, 0x2f, 0x2e, 0xc9, 0x2f, 0x4a, 0x4c, 0x4f, 0xd5, 0x2b, 0x28, 0xca, 0x2f, + 0xc9, 0x17, 0x12, 0x07, 0x2b, 0xd3, 0x03, 0x29, 0xd3, 0x83, 0x2b, 0xd3, 0x2b, 0x33, 0x94, 0x12, + 0x49, 0xcf, 0x4f, 0xcf, 0x07, 0xab, 0xd1, 0x07, 0xb1, 0x20, 0xca, 0xa5, 0x34, 0x90, 0x4c, 0x4d, + 0x2c, 0x29, 0x29, 0xca, 0x4c, 0x2a, 0x2d, 0x81, 0x18, 0x0b, 0xe7, 0x41, 0x55, 0x6a, 0xe3, 0xb2, + 0x1f, 0xc6, 0x29, 0x4b, 0xcc, 0x29, 0x85, 0x2a, 0x56, 0x7a, 0xc7, 0xc4, 0xc5, 0x1e, 0x0c, 0x71, + 0x97, 0x90, 0x36, 0x17, 0x4b, 0x5e, 0x62, 0x6e, 0xaa, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0xa7, 0x93, + 0xf8, 0xab, 0x7b, 0xf2, 0x60, 0xfe, 0xa7, 0x7b, 0xf2, 0xdc, 0x95, 0x89, 0xb9, 0x39, 0x56, 0x4a, + 0x20, 0x9e, 0x52, 0x10, 0x58, 0x50, 0x28, 0x9e, 0x8b, 0xa3, 0xb0, 0x34, 0x31, 0xaf, 0x24, 0xb3, + 0xa4, 0x52, 0x82, 0x49, 0x81, 0x51, 0x83, 0xdb, 0x48, 0x4d, 0x0f, 0x87, 0x8f, 0xf4, 0x82, 0xa0, + 0x9c, 0x30, 0x90, 0xc5, 0x4e, 0xd2, 0x27, 0xee, 0xc9, 0x33, 0x80, 0x0c, 0x2f, 0xce, 0xac, 0x42, + 0x32, 0x1c, 0xc4, 0x53, 0x0a, 0x82, 0x1b, 0x2a, 0x74, 0x8e, 0x91, 0x8b, 0x0b, 0xe1, 0x51, 0x09, + 0x66, 0x05, 0x66, 0x0d, 0x6e, 0x23, 0x65, 0x64, 0x3b, 0x10, 0xb2, 0x20, 0x4b, 0x1c, 0x61, 0x3c, + 0xa7, 0x0e, 0x46, 0xa8, 0x0d, 0x22, 0x08, 0x15, 0x3a, 0xf9, 0xb9, 0x99, 0x25, 0xa9, 0xb9, 0x05, + 0x25, 0x95, 0x9f, 0xee, 0xc9, 0x4b, 0x43, 0x6c, 0xc4, 0x26, 0xab, 0xb4, 0xea, 0xbe, 0xbc, 0x67, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x3e, 0xd8, 0x3a, 0xdd, 0xbc, 0xd4, + 0x92, 0xf2, 0xfc, 0xa2, 0x6c, 0x28, 0x2f, 0xb1, 0x20, 0x53, 0x3f, 0x3d, 0x5f, 0x3f, 0x2f, 0x3f, + 0x25, 0x55, 0xbf, 0xa4, 0xb2, 0x20, 0xb5, 0x18, 0x35, 0x42, 0x10, 0x2e, 0x29, 0x0e, 0x42, 0xf2, + 0x81, 0x15, 0xcb, 0x8b, 0x05, 0xf2, 0x8c, 0x4e, 0xe1, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, + 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, 0xc7, 0x72, 0x0c, 0x17, 0x1e, 0xcb, 0x31, 0xdc, 0x78, + 0x2c, 0xc7, 0x10, 0x65, 0x4b, 0xb2, 0xb5, 0xc8, 0xb1, 0x9b, 0xc4, 0x06, 0x8e, 0x50, 0x63, 0x40, + 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x38, 0xde, 0x87, 0x7f, 0x02, 0x00, 0x00, +} + +func (this *Storage) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Storage) + if !ok { + that2, ok := that.(Storage) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Quantity.Equal(&that1.Quantity) { + return false + } + if len(this.Attributes) != len(that1.Attributes) { + return false + } + for i := range this.Attributes { + if !this.Attributes[i].Equal(&that1.Attributes[i]) { + return false + } + } + return true +} +func (m *Storage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Storage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Storage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Attributes) > 0 { + for iNdEx := len(m.Attributes) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Attributes[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStorage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + } + } + { + size, err := m.Quantity.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintStorage(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintStorage(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func encodeVarintStorage(dAtA []byte, offset int, v uint64) int { + offset -= sovStorage(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *Storage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovStorage(uint64(l)) + } + l = m.Quantity.Size() + n += 1 + l + sovStorage(uint64(l)) + if len(m.Attributes) > 0 { + for _, e := range m.Attributes { + l = e.Size() + n += 1 + l + sovStorage(uint64(l)) + } + } + return n +} + +func sovStorage(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozStorage(x uint64) (n int) { + return sovStorage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Storage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Storage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Storage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthStorage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Quantity", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStorage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Quantity.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Attributes", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthStorage + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Attributes = append(m.Attributes, v1.Attribute{}) + if err := m.Attributes[len(m.Attributes)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStorage(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthStorage + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupStorage + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthStorage + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthStorage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStorage = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupStorage = fmt.Errorf("proto: unexpected end of group") +) diff --git a/go/node/types/v1beta3/attribute.go b/go/node/types/v1beta3/attribute.go index a2bb42b6..fa9038ac 100644 --- a/go/node/types/v1beta3/attribute.go +++ b/go/node/types/v1beta3/attribute.go @@ -1,6 +1,7 @@ package v1beta3 import ( + "errors" "path/filepath" "reflect" "regexp" @@ -8,7 +9,6 @@ import ( "strconv" "strings" - sdkerrors "github.com/cosmos/cosmos-sdk/types/errors" "gopkg.in/yaml.v3" ) @@ -18,14 +18,9 @@ const ( AttributeNameRegexpString = `^([a-zA-Z][\w\/\.\-]{1,126})$` ) -const ( - errAttributesDuplicateKeys uint32 = iota + 1 - errInvalidAttributeKey -) - var ( - ErrAttributesDuplicateKeys = sdkerrors.Register(moduleName, errAttributesDuplicateKeys, "attributes cannot have duplicate keys") - ErrInvalidAttributeKey = sdkerrors.Register(moduleName, errInvalidAttributeKey, "attribute key does not match regexp") + ErrAttributesDuplicateKeys = errors.New("attributes cannot have duplicate keys") + ErrInvalidAttributeKey = errors.New("attribute key does not match regexp") ) var ( diff --git a/proto/node/akash/audit/v1beta4/audit.proto b/proto/node/akash/audit/v1beta4/audit.proto new file mode 100644 index 00000000..ca4daa40 --- /dev/null +++ b/proto/node/akash/audit/v1beta4/audit.proto @@ -0,0 +1,124 @@ +syntax = "proto3"; +package akash.audit.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/base/attributes/v1/attribute.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta4"; + +// Msg defines the provider Msg service +service Msg { + // SignProviderAttributes defines a method that signs provider attributes + rpc SignProviderAttributes(MsgSignProviderAttributes) returns (MsgSignProviderAttributesResponse); + + // DeleteProviderAttributes defines a method that deletes provider attributes + rpc DeleteProviderAttributes(MsgDeleteProviderAttributes) returns (MsgDeleteProviderAttributesResponse); +} + +// Provider stores owner auditor and attributes details +message Provider { + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string auditor = 2 [ + (gogoproto.jsontag) = "auditor", + (gogoproto.moretags) = "yaml:\"auditor\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 4 [ + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} + +// Attributes +message AuditedAttributes { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = true; + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string auditor = 2 [ + (gogoproto.jsontag) = "auditor", + (gogoproto.moretags) = "yaml:\"auditor\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} + +// AttributesResponse represents details of deployment along with group details +message AttributesResponse { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = true; + + repeated AuditedAttributes attributes = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} + +// AttributesFilters defines filters used to filter deployments +message AttributesFilters { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = true; + + repeated string auditors = 1 [ + (gogoproto.jsontag) = "auditors", + (gogoproto.moretags) = "yaml:\"auditors\"" + ]; + repeated string owners = 2 [ + (gogoproto.jsontag) = "owners", + (gogoproto.moretags) = "yaml:\"owners\"" + ]; +} + +// MsgSignProviderAttributes defines an SDK message for signing a provider attributes +message MsgSignProviderAttributes { + option (gogoproto.equal) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string auditor = 2 [ + (gogoproto.jsontag) = "auditor", + (gogoproto.moretags) = "yaml:\"auditor\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} + +// MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. +message MsgSignProviderAttributesResponse {} + +// MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes +message MsgDeleteProviderAttributes { + option (gogoproto.equal) = false; + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + string auditor = 2 [ + (gogoproto.jsontag) = "auditor", + (gogoproto.moretags) = "yaml:\"auditor\"" + ]; + repeated string keys = 3 [ + (gogoproto.jsontag) = "keys", + (gogoproto.moretags) = "yaml:\"keys\"" + ]; +} + +// MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. +message MsgDeleteProviderAttributesResponse {} diff --git a/proto/node/akash/audit/v1beta4/genesis.proto b/proto/node/akash/audit/v1beta4/genesis.proto new file mode 100644 index 00000000..2babda9d --- /dev/null +++ b/proto/node/akash/audit/v1beta4/genesis.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; +package akash.audit.v1beta4; + +import "gogoproto/gogo.proto"; + +import "akash/audit/v1beta4/audit.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta4"; + +// GenesisState defines the basic genesis state used by audit module +message GenesisState { + repeated AuditedAttributes attributes = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} diff --git a/proto/node/akash/audit/v1beta4/query.proto b/proto/node/akash/audit/v1beta4/query.proto new file mode 100644 index 00000000..a13baae0 --- /dev/null +++ b/proto/node/akash/audit/v1beta4/query.proto @@ -0,0 +1,82 @@ +syntax = "proto3"; + +// buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + +package akash.audit.v1beta4; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "akash/audit/v1beta4/audit.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/audit/v1beta4"; + +// Query defines the gRPC querier service +service Query { + // AllProvidersAttributes queries all providers + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + rpc AllProvidersAttributes(QueryAllProvidersAttributesRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/audit/v1beta4/audit/attributes/list"; + } + + // ProviderAttributes queries all provider signed attributes + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + rpc ProviderAttributes(QueryProviderAttributesRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/audit/v1beta4/audit/attributes/{owner}/list"; + } + + // ProviderAuditorAttributes queries provider signed attributes by specific auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + rpc ProviderAuditorAttributes(QueryProviderAuditorRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/audit/v1beta4/audit/attributes/{auditor}/{owner}"; + } + + // AuditorAttributes queries all providers signed by this auditor + // buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + // buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + rpc AuditorAttributes(QueryAuditorAttributesRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/provider/v1beta4/auditor/{auditor}/list"; + } +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +message QueryProvidersResponse { + repeated Provider providers = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Providers" + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryProviderRequest is request type for the Query/Provider RPC method +message QueryProviderRequest { + string auditor = 1; + string owner = 2; +} + +// QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method +message QueryAllProvidersAttributesRequest { + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryProviderAttributesRequest is request type for the Query/Provider RPC method +message QueryProviderAttributesRequest { + string owner = 1; + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryProviderAuditorRequest is request type for the Query/Providers RPC method +message QueryProviderAuditorRequest { + string auditor = 1; + string owner = 2; +} + +// QueryAuditorAttributesRequest is request type for the Query/Providers RPC method +message QueryAuditorAttributesRequest { + string auditor = 1; + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} diff --git a/proto/node/akash/base/attributes/v1/attribute.proto b/proto/node/akash/base/attributes/v1/attribute.proto new file mode 100644 index 00000000..e3270d91 --- /dev/null +++ b/proto/node/akash/base/attributes/v1/attribute.proto @@ -0,0 +1,54 @@ +syntax = "proto3"; + +package akash.base.attributes.v1; + +import "gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = false; +option go_package = "github.com/akash-network/akash-api/go/node/types/attributes/v1"; + +// Attribute represents key value pair +message Attribute { + option (gogoproto.goproto_getters) = false; + string key = 1 [(gogoproto.moretags) = "yaml:\"key\""]; + string value = 2 [(gogoproto.moretags) = "yaml:\"value\""]; +} + +// SignedBy represents validation accounts that tenant expects signatures for provider attributes +// AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many +// entries there +// this behaviour to be discussed +message SignedBy { + option (gogoproto.goproto_getters) = false; + // all_of all keys in this list must have signed attributes + repeated string all_of = 1 [ + (gogoproto.jsontag) = "all_of", + (gogoproto.moretags) = "yaml:\"allOf\"" + ]; + // any_of at least of of the keys from the list must have signed attributes + repeated string any_of = 2 [ + (gogoproto.jsontag) = "any_of", + (gogoproto.moretags) = "yaml:\"anyOf\"" + ]; +} + +// PlacementRequirements +message PlacementRequirements { + option (gogoproto.goproto_getters) = false; + + // SignedBy list of keys that tenants expect to have signatures from + SignedBy signed_by = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "signed_by", + (gogoproto.moretags) = "yaml:\"signed_by\"" + ]; + + // Attribute list of attributes tenant expects from the provider + repeated Attribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Attributes", + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1/cpu.proto b/proto/node/akash/base/resources/v1/cpu.proto new file mode 100644 index 00000000..6d1821a2 --- /dev/null +++ b/proto/node/akash/base/resources/v1/cpu.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; +package akash.base.resources.v1; + +import "gogoproto/gogo.proto"; + +import "akash/base/attributes/v1/attribute.proto"; +import "akash/base/resources/v1/resourcevalue.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/types/resources/v1"; + +// CPU stores resource units and cpu config attributes +message CPU { + option (gogoproto.equal) = true; + ResourceValue units = 1 [ + (gogoproto.nullable) = false + ]; + repeated akash.base.attributes.v1.Attribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.jsontag) = "attributes,omitempty", + (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1/endpoint.proto b/proto/node/akash/base/resources/v1/endpoint.proto new file mode 100644 index 00000000..00572136 --- /dev/null +++ b/proto/node/akash/base/resources/v1/endpoint.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package akash.base.resources.v1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/types/resources/v1"; + +// Endpoint describes a publicly accessible IP service +message Endpoint { + option (gogoproto.equal) = true; + + // This describes how the endpoint is implemented when the lease is deployed + enum Kind { + // Describes an endpoint that becomes a Kubernetes Ingress + SHARED_HTTP = 0; + // Describes an endpoint that becomes a Kubernetes NodePort + RANDOM_PORT = 1; + // Describes an endpoint that becomes a leased IP + LEASED_IP = 2; + } + + Kind kind = 1; + uint32 sequence_number = 2 [ + (gogoproto.customname) = "SequenceNumber", + (gogoproto.jsontag) = "sequence_number", + (gogoproto.moretags) = "yaml:\"sequence_number\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1/gpu.proto b/proto/node/akash/base/resources/v1/gpu.proto new file mode 100644 index 00000000..b36f1c99 --- /dev/null +++ b/proto/node/akash/base/resources/v1/gpu.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +package akash.base.resources.v1; + +import "gogoproto/gogo.proto"; +import "akash/base/attributes/v1/attribute.proto"; +import "akash/base/resources/v1/resourcevalue.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/types/resources/v1"; + +// GPU stores resource units and cpu config attributes +message GPU { + option (gogoproto.equal) = true; + ResourceValue units = 1 [ + (gogoproto.nullable) = false + ]; + repeated akash.base.attributes.v1.Attribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.jsontag) = "attributes,omitempty", + (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1/memory.proto b/proto/node/akash/base/resources/v1/memory.proto new file mode 100644 index 00000000..91e11aa5 --- /dev/null +++ b/proto/node/akash/base/resources/v1/memory.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; +package akash.base.resources.v1; + +import "gogoproto/gogo.proto"; + +import "akash/base/attributes/v1/attribute.proto"; +import "akash/base/resources/v1/resourcevalue.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/types/resources/v1"; + +// Memory stores resource quantity and memory attributes +message Memory { + option (gogoproto.equal) = true; + ResourceValue quantity = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "size", + (gogoproto.moretags) = "yaml:\"size\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.jsontag) = "attributes,omitempty", + (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1/resources.proto b/proto/node/akash/base/resources/v1/resources.proto new file mode 100644 index 00000000..b6e89b4b --- /dev/null +++ b/proto/node/akash/base/resources/v1/resources.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; +package akash.base.resources.v1; + +import "gogoproto/gogo.proto"; + +import "akash/base/resources/v1/cpu.proto"; +import "akash/base/resources/v1/gpu.proto"; +import "akash/base/resources/v1/memory.proto"; +import "akash/base/resources/v1/storage.proto"; +import "akash/base/resources/v1/endpoint.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/types/resources/v1"; + +// Resources describes all available resources types for deployment/node etc +// if field is nil resource is not present in the given data-structure +message Resources { + option (gogoproto.equal) = true; + uint32 id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + CPU cpu = 2 [ + (gogoproto.nullable) = true, + (gogoproto.customname) = "CPU", + (gogoproto.jsontag) = "cpu,omitempty", + (gogoproto.moretags) = "yaml:\"cpu,omitempty\"" + ]; + Memory memory = 3 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "memory,omitempty", + (gogoproto.moretags) = "yaml:\"memory,omitempty\"" + ]; + repeated Storage storage = 4 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Volumes", + (gogoproto.jsontag) = "storage,omitempty", + (gogoproto.moretags) = "yaml:\"storage,omitempty\"" + ]; + GPU gpu = 5 [ + (gogoproto.nullable) = true, + (gogoproto.customname) = "GPU", + (gogoproto.jsontag) = "gpu,omitempty", + (gogoproto.moretags) = "yaml:\"gpu,omitempty\"" + ]; + repeated Endpoint endpoints = 6 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Endpoints", + (gogoproto.jsontag) = "endpoints,omitempty", + (gogoproto.moretags) = "yaml:\"endpoints,omitempty\"" + ]; +} diff --git a/proto/node/akash/base/resources/v1/resourcevalue.proto b/proto/node/akash/base/resources/v1/resourcevalue.proto new file mode 100644 index 00000000..e88b6dab --- /dev/null +++ b/proto/node/akash/base/resources/v1/resourcevalue.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; +package akash.base.resources.v1; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/types/resources/v1"; + +// Unit stores cpu, memory and storage metrics +message ResourceValue { + option (gogoproto.equal) = true; + bytes val = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customtype) = "github.com/cosmos/cosmos-sdk/types.Int" + ]; +} diff --git a/proto/node/akash/base/resources/v1/storage.proto b/proto/node/akash/base/resources/v1/storage.proto new file mode 100644 index 00000000..f0014935 --- /dev/null +++ b/proto/node/akash/base/resources/v1/storage.proto @@ -0,0 +1,28 @@ +syntax = "proto3"; +package akash.base.resources.v1; + +import "gogoproto/gogo.proto"; +import "akash/base/attributes/v1/attribute.proto"; +import "akash/base/resources/v1/resourcevalue.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/types/resources/v1"; + +// Storage stores resource quantity and storage attributes +message Storage { + option (gogoproto.equal) = true; + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + ResourceValue quantity = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "size", + (gogoproto.moretags) = "yaml:\"size\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.jsontag) = "attributes,omitempty", + (gogoproto.moretags) = "yaml:\"attributes,omitempty\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/authz.proto b/proto/node/akash/deployment/v1beta4/authz.proto new file mode 100644 index 00000000..755428bf --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/authz.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "cosmos_proto/cosmos.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from +// the granter's account for a deployment. +message DepositDeploymentAuthorization { + option (cosmos_proto.implements_interface) = "Authorization"; + + // SpendLimit is the amount the grantee is authorized to spend from the granter's account for + // the purpose of deployment. + cosmos.base.v1beta1.Coin spend_limit = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "spend_limit" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/deployment.proto b/proto/node/akash/deployment/v1beta4/deployment.proto new file mode 100644 index 00000000..33478668 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/deployment.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// DeploymentID stores owner and sequence number +message DeploymentID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; +} + +// Deployment stores deploymentID, state and version details +message Deployment { + option (gogoproto.equal) = false; + + DeploymentID deployment_id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "DeploymentID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + // State is an enum which refers to state of deployment + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [(gogoproto.enumvalue_customname) = "DeploymentStateInvalid"]; + // DeploymentActive denotes state for deployment active + active = 1 [(gogoproto.enumvalue_customname) = "DeploymentActive"]; + // DeploymentClosed denotes state for deployment closed + closed = 2 [(gogoproto.enumvalue_customname) = "DeploymentClosed"]; + } + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + bytes version = 3 [ + (gogoproto.jsontag) = "version", + (gogoproto.moretags) = "yaml:\"version\"" + ]; + int64 created_at = 4; +} + +// DeploymentFilters defines filters used to filter deployments +message DeploymentFilters { + option (gogoproto.equal) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + string state = 3 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/deploymentmsg.proto b/proto/node/akash/deployment/v1beta4/deploymentmsg.proto new file mode 100644 index 00000000..c2b52d61 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/deploymentmsg.proto @@ -0,0 +1,106 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; + +import "akash/deployment/v1beta4/deployment.proto"; +import "akash/deployment/v1beta4/groupspec.proto"; + +import "cosmos/base/v1beta1/coin.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// MsgCreateDeployment defines an SDK message for creating deployment +message MsgCreateDeployment { + option (gogoproto.equal) = false; + + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + repeated GroupSpec groups = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "groups", + (gogoproto.moretags) = "yaml:\"groups\"" + ]; + bytes version = 3 [ + (gogoproto.jsontag) = "version", + (gogoproto.moretags) = "yaml:\"version\"" + ]; + cosmos.base.v1beta1.Coin deposit = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "deposit", + (gogoproto.moretags) = "yaml:\"deposit\"" + ]; + // Depositor pays for the deposit + string depositor = 5 [ + (gogoproto.jsontag) = "depositor", + (gogoproto.moretags) = "yaml:\"depositor\"" + ]; +} + +// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. +message MsgCreateDeploymentResponse {} + +// MsgDepositDeployment deposits more funds into the deposit account +message MsgDepositDeployment { + option (gogoproto.equal) = false; + + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + cosmos.base.v1beta1.Coin amount = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "amount", + (gogoproto.moretags) = "yaml:\"amount\"" + ]; + + // Depositor pays for the deposit + string depositor = 3 [ + (gogoproto.jsontag) = "depositor", + (gogoproto.moretags) = "yaml:\"depositor\"" + ]; +} + +// MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. +message MsgDepositDeploymentResponse {} + +// MsgUpdateDeployment defines an SDK message for updating deployment +message MsgUpdateDeployment { + option (gogoproto.equal) = false; + + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + bytes version = 3 [ + (gogoproto.jsontag) = "version", + (gogoproto.moretags) = "yaml:\"version\"" + ]; +} + +// MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. +message MsgUpdateDeploymentResponse {} + +// MsgCloseDeployment defines an SDK message for closing deployment +message MsgCloseDeployment { + option (gogoproto.equal) = false; + + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID", + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. +message MsgCloseDeploymentResponse {} diff --git a/proto/node/akash/deployment/v1beta4/genesis.proto b/proto/node/akash/deployment/v1beta4/genesis.proto new file mode 100644 index 00000000..437f508a --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/genesis.proto @@ -0,0 +1,39 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/deployment/v1beta4/deployment.proto"; +import "akash/deployment/v1beta4/group.proto"; +import "akash/deployment/v1beta4/params.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// GenesisDeployment defines the basic genesis state used by deployment module +message GenesisDeployment { + Deployment deployment = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "deployment", + (gogoproto.moretags) = "yaml:\"deployment\"" + ]; + + repeated Group groups = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "groups", + (gogoproto.moretags) = "yaml:\"groups\"" + ]; +} + +// GenesisState stores slice of genesis deployment instance +message GenesisState { + repeated GenesisDeployment deployments = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "deployments", + (gogoproto.moretags) = "yaml:\"deployments\"" + ]; + + Params params = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "params", + (gogoproto.moretags) = "yaml:\"params\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/group.proto b/proto/node/akash/deployment/v1beta4/group.proto new file mode 100644 index 00000000..127fb039 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/group.proto @@ -0,0 +1,58 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/deployment/v1beta4/groupid.proto"; +import "akash/deployment/v1beta4/groupspec.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// Group stores group id, state and specifications of group +message Group { + option (gogoproto.equal) = false; + + GroupID group_id = 1 [ + (gogoproto.customname) = "GroupID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; + + // State is an enum which refers to state of group + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "GroupStateInvalid" + ]; + // GroupOpen denotes state for group open + open = 1 [ + (gogoproto.enumvalue_customname) = "GroupOpen" + ]; + // GroupOrdered denotes state for group ordered + paused = 2 [ + (gogoproto.enumvalue_customname) = "GroupPaused" + ]; + // GroupInsufficientFunds denotes state for group insufficient_funds + insufficient_funds = 3 [ + (gogoproto.enumvalue_customname) = "GroupInsufficientFunds" + ]; + // GroupClosed denotes state for group closed + closed = 4 [ + (gogoproto.enumvalue_customname) = "GroupClosed" + ]; + } + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = "yaml:\"state\"" + ]; + GroupSpec group_spec = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "spec", + (gogoproto.moretags) = "yaml:\"spec\"" + ]; + + int64 created_at = 4; +} diff --git a/proto/node/akash/deployment/v1beta4/groupid.proto b/proto/node/akash/deployment/v1beta4/groupid.proto new file mode 100644 index 00000000..278ce046 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/groupid.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// GroupID stores owner, deployment sequence number and group sequence number +message GroupID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = "yaml:\"owner\"" + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = "yaml:\"dseq\"" + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = "yaml:\"gseq\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/groupmsg.proto b/proto/node/akash/deployment/v1beta4/groupmsg.proto new file mode 100644 index 00000000..624bf937 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/groupmsg.proto @@ -0,0 +1,52 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/deployment/v1beta4/groupid.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// MsgCloseGroup defines SDK message to close a single Group within a Deployment. +message MsgCloseGroup { + option (gogoproto.equal) = false; + + GroupID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgCloseGroupResponse defines the Msg/CloseGroup response type. +message MsgCloseGroupResponse {} + +// MsgPauseGroup defines SDK message to close a single Group within a Deployment. +message MsgPauseGroup { + option (gogoproto.equal) = false; + + GroupID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgPauseGroupResponse defines the Msg/PauseGroup response type. +message MsgPauseGroupResponse {} + +// MsgStartGroup defines SDK message to close a single Group within a Deployment. +message MsgStartGroup { + option (gogoproto.equal) = false; + + GroupID id = 1 [ + (gogoproto.customname) = "ID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = "yaml:\"id\"" + ]; +} + +// MsgStartGroupResponse defines the Msg/StartGroup response type. +message MsgStartGroupResponse {} diff --git a/proto/node/akash/deployment/v1beta4/groupspec.proto b/proto/node/akash/deployment/v1beta4/groupspec.proto new file mode 100644 index 00000000..a28ca2f9 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/groupspec.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/base/attributes/v1/attribute.proto"; +import "akash/deployment/v1beta4/resourceunit.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// GroupSpec stores group specifications +message GroupSpec { + option (gogoproto.equal) = false; + option (gogoproto.goproto_getters) = false; + + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + + akash.base.attributes.v1.PlacementRequirements requirements = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "requirements", + (gogoproto.moretags) = "yaml:\"requirements\"" + ]; + + repeated ResourceUnit resources = 3 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "ResourceUnits", + (gogoproto.jsontag) = "resources", + (gogoproto.moretags) = "yaml:\"resources\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/params.proto b/proto/node/akash/deployment/v1beta4/params.proto new file mode 100644 index 00000000..34d74bd6 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/params.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// Params defines the parameters for the x/deployment package +message Params { + repeated cosmos.base.v1beta1.Coin min_deposits = 1[ + (gogoproto.castrepeated) = "github.com/cosmos/cosmos-sdk/types.Coins", + (gogoproto.customname) = "MinDeposits", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "min_deposits", + (gogoproto.moretags) = "yaml:\"min_deposits\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/query.proto b/proto/node/akash/deployment/v1beta4/query.proto new file mode 100644 index 00000000..71cd6797 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/query.proto @@ -0,0 +1,90 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "akash/deployment/v1beta4/deployment.proto"; +import "akash/deployment/v1beta4/group.proto"; +import "akash/deployment/v1beta4/groupid.proto"; +import "akash/escrow/v1beta3/types.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// Query defines the gRPC querier service +service Query { + // Deployments queries deployments + rpc Deployments(QueryDeploymentsRequest) returns (QueryDeploymentsResponse) { + option (google.api.http).get = "/akash/deployment/v1beta4/deployments/list"; + } + + // Deployment queries deployment details + rpc Deployment(QueryDeploymentRequest) returns (QueryDeploymentResponse) { + option (google.api.http).get = "/akash/deployment/v1beta4/deployments/info"; + } + + // Group queries group details + rpc Group(QueryGroupRequest) returns (QueryGroupResponse) { + option (google.api.http).get = "/akash/deployment/v1beta4/groups/info"; + } +} + +// QueryDeploymentsRequest is request type for the Query/Deployments RPC method +message QueryDeploymentsRequest { + DeploymentFilters filters = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryDeploymentsResponse is response type for the Query/Deployments RPC method +message QueryDeploymentsResponse { + repeated QueryDeploymentResponse deployments = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "DeploymentResponses" + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryDeploymentRequest is request type for the Query/Deployment RPC method +message QueryDeploymentRequest { + DeploymentID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryDeploymentResponse is response type for the Query/Deployment RPC method +message QueryDeploymentResponse { + option (gogoproto.equal) = false; + Deployment deployment = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "deployment", + (gogoproto.moretags) = "yaml:\"deployment\"" + ]; + repeated Group groups = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "groups", + (gogoproto.moretags) = "yaml:\"groups\"" + ]; + akash.escrow.v1beta3.Account escrow_account = 3 [ + (gogoproto.nullable) = false + ]; +} + +// QueryGroupRequest is request type for the Query/Group RPC method +message QueryGroupRequest { + GroupID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryGroupResponse is response type for the Query/Group RPC method +message QueryGroupResponse { + Group group = 1 [ + (gogoproto.nullable) = false + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/resourceunit.proto b/proto/node/akash/deployment/v1beta4/resourceunit.proto new file mode 100644 index 00000000..b670f846 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/resourceunit.proto @@ -0,0 +1,29 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/base/resources/v1/resources.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// ResourceUnit extends Resources and adds Count along with the Price +message ResourceUnit { + option (gogoproto.equal) = true; + + akash.base.resources.v1.Resources resource = 1 [ + (gogoproto.nullable) = false, + (gogoproto.embed) = true, + (gogoproto.jsontag) = "resource", + (gogoproto.moretags) = "yaml:\"resource\"" + ]; + uint32 count = 2 [ + (gogoproto.jsontag) = "count", + (gogoproto.moretags) = "yaml:\"count\"" + ]; + cosmos.base.v1beta1.DecCoin price = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "price", + (gogoproto.moretags) = "yaml:\"price\"" + ]; +} diff --git a/proto/node/akash/deployment/v1beta4/service.proto b/proto/node/akash/deployment/v1beta4/service.proto new file mode 100644 index 00000000..dfcbc692 --- /dev/null +++ b/proto/node/akash/deployment/v1beta4/service.proto @@ -0,0 +1,31 @@ +syntax = "proto3"; +package akash.deployment.v1beta4; + +import "akash/deployment/v1beta4/deploymentmsg.proto"; +import "akash/deployment/v1beta4/groupmsg.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/deployment/v1beta4"; + +// Msg defines the deployment Msg service. +service Msg { + // CreateDeployment defines a method to create new deployment given proper inputs. + rpc CreateDeployment(MsgCreateDeployment) returns (MsgCreateDeploymentResponse); + + // DepositDeployment deposits more funds into the deployment account + rpc DepositDeployment(MsgDepositDeployment) returns (MsgDepositDeploymentResponse); + + // UpdateDeployment defines a method to update a deployment given proper inputs. + rpc UpdateDeployment(MsgUpdateDeployment) returns (MsgUpdateDeploymentResponse); + + // CloseDeployment defines a method to close a deployment given proper inputs. + rpc CloseDeployment(MsgCloseDeployment) returns (MsgCloseDeploymentResponse); + + // CloseGroup defines a method to close a group of a deployment given proper inputs. + rpc CloseGroup(MsgCloseGroup) returns (MsgCloseGroupResponse); + + // PauseGroup defines a method to close a group of a deployment given proper inputs. + rpc PauseGroup(MsgPauseGroup) returns (MsgPauseGroupResponse); + + // StartGroup defines a method to close a group of a deployment given proper inputs. + rpc StartGroup(MsgStartGroup) returns (MsgStartGroupResponse); +} diff --git a/proto/node/akash/market/v1beta5/bid.proto b/proto/node/akash/market/v1beta5/bid.proto new file mode 100644 index 00000000..89d5c10b --- /dev/null +++ b/proto/node/akash/market/v1beta5/bid.proto @@ -0,0 +1,199 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +import "akash/base/resources/v1/resources.proto"; +import "akash/market/v1beta5/order.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta5"; + +// ResourceOffer describes resources that provider is offering +// for deployment +message ResourceOffer { + option (gogoproto.equal) = true; + akash.base.resources.v1.Resources resources = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "resources", + (gogoproto.moretags) = 'yaml:"resources"' + ]; + uint32 count = 2 [ + (gogoproto.jsontag) = "count", + (gogoproto.moretags) = 'yaml:"count"' + ]; +} + +// MsgCreateBid defines an SDK message for creating Bid +message MsgCreateBid { + option (gogoproto.equal) = false; + + OrderID order = 1 [ + (gogoproto.customname) = "Order", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "order", + (gogoproto.moretags) = 'yaml:"order"' + ]; + string provider = 2 [ + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = 'yaml:"provider"' + ]; + cosmos.base.v1beta1.DecCoin price = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "price", + (gogoproto.moretags) = 'yaml:"price"' + ]; + cosmos.base.v1beta1.Coin deposit = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "deposit", + (gogoproto.moretags) = 'yaml:"deposit"' + ]; + repeated ResourceOffer resources_offer = 5 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "ResourcesOffer", + (gogoproto.customname) = "ResourcesOffer", + (gogoproto.jsontag) = "resources_offer", + (gogoproto.moretags) = 'yaml:"resources_offer"' + ]; +} + +// MsgCreateBidResponse defines the Msg/CreateBid response type. +message MsgCreateBidResponse {} + +// MsgCloseBid defines an SDK message for closing bid +message MsgCloseBid { + option (gogoproto.equal) = false; + + BidID bid_id = 1 [ + (gogoproto.customname) = "BidID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = 'yaml:"id"' + ]; +} + +// MsgCloseBidResponse defines the Msg/CloseBid response type. +message MsgCloseBidResponse {} + +// BidID stores owner and all other seq numbers +// A successful bid becomes a Lease(ID). +message BidID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = 'yaml:"owner"' + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = 'yaml:"dseq"' + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = 'yaml:"gseq"' + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = 'yaml:"oseq"' + ]; + string provider = 5 [ + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = 'yaml:"provider"' + ]; +} + +// Bid stores BidID, state of bid and price +message Bid { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + BidID bid_id = 1 [ + (gogoproto.customname) = "BidID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = 'yaml:"id"' + ]; + + // State is an enum which refers to state of bid + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "BidStateInvalid" + ]; + // BidOpen denotes state for bid open + open = 1 [ + (gogoproto.enumvalue_customname) = "BidOpen" + ]; + // BidMatched denotes state for bid open + active = 2 [ + (gogoproto.enumvalue_customname) = "BidActive" + ]; + // BidLost denotes state for bid lost + lost = 3 [ + (gogoproto.enumvalue_customname) = "BidLost" + ]; + // BidClosed denotes state for bid closed + closed = 4 [ + (gogoproto.enumvalue_customname) = "BidClosed" + ]; + } + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = 'yaml:"state"' + ]; + + cosmos.base.v1beta1.DecCoin price = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "price", + (gogoproto.moretags) = 'yaml:"price"' + ]; + int64 created_at = 4; + + repeated ResourceOffer resources_offer = 5 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "ResourcesOffer", + (gogoproto.customname) = "ResourcesOffer", + (gogoproto.jsontag) = "resources_offer", + (gogoproto.moretags) = 'yaml:"resources_offer"' + ]; +} + +// BidFilters defines flags for bid list filter +message BidFilters { + option (gogoproto.equal) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = 'yaml:"owner"' + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = 'yaml:"dseq"' + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = 'yaml:"gseq"' + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = 'yaml:"oseq"' + ]; + string provider = 5 [ + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = 'yaml:"provider"' + ]; + string state = 6 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = 'yaml:"state"' + ]; +} diff --git a/proto/node/akash/market/v1beta5/genesis.proto b/proto/node/akash/market/v1beta5/genesis.proto new file mode 100644 index 00000000..7f7acfd7 --- /dev/null +++ b/proto/node/akash/market/v1beta5/genesis.proto @@ -0,0 +1,35 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; + +import "akash/market/v1beta5/order.proto"; +import "akash/market/v1beta5/lease.proto"; +import "akash/market/v1beta5/bid.proto"; +import "akash/market/v1beta5/params.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta5"; + +// GenesisState defines the basic genesis state used by market module +message GenesisState { + Params params = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "params", + (gogoproto.moretags) = 'yaml:"params"' + ]; + repeated Order orders = 2 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "orders", + (gogoproto.moretags) = 'yaml:"orders"' + ]; + repeated Lease leases = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "leases", + (gogoproto.moretags) = 'yaml:"leases"' + ]; + repeated Bid bids = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "bids", + (gogoproto.moretags) = 'yaml:"bids"' + ]; +} diff --git a/proto/node/akash/market/v1beta5/lease.proto b/proto/node/akash/market/v1beta5/lease.proto new file mode 100644 index 00000000..ede77709 --- /dev/null +++ b/proto/node/akash/market/v1beta5/lease.proto @@ -0,0 +1,166 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; + +import "cosmos/base/v1beta1/coin.proto"; + +import "akash/market/v1beta5/bid.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta5"; + +// LeaseID stores bid details of lease +message LeaseID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = 'yaml:"owner"' + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = 'yaml:"dseq"' + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = 'yaml:"gseq"' + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = 'yaml:"oseq"' + ]; + string provider = 5 [ + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = 'yaml:"provider"' + ]; +} + +// Lease stores LeaseID, state of lease and price +message Lease { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + LeaseID lease_id = 1 [ + (gogoproto.customname) = "LeaseID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = 'yaml:"id"' + ]; + + // State is an enum which refers to state of lease + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "LeaseStateInvalid" + ]; + // LeaseActive denotes state for lease active + active = 1 [ + (gogoproto.enumvalue_customname) = "LeaseActive" + ]; + // LeaseInsufficientFunds denotes state for lease insufficient_funds + insufficient_funds = 2 [ + (gogoproto.enumvalue_customname) = "LeaseInsufficientFunds" + ]; + // LeaseClosed denotes state for lease closed + closed = 3 [ + (gogoproto.enumvalue_customname) = "LeaseClosed" + ]; + } + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = 'yaml:"state"' + ]; + cosmos.base.v1beta1.DecCoin price = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "price", + (gogoproto.moretags) = 'yaml:"price"' + ]; + int64 created_at = 4; + int64 closed_on = 5; +} + +// LeaseFilters defines flags for lease list filter +message LeaseFilters { + option (gogoproto.equal) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = 'yaml:"owner"' + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = 'yaml:"dseq"' + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = 'yaml:"gseq"' + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = 'yaml:"oseq"' + ]; + string provider = 5 [ + (gogoproto.jsontag) = "provider", + (gogoproto.moretags) = 'yaml:"provider"' + ]; + string state = 6 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = 'yaml:"state"' + ]; +} + +// MsgCreateLease is sent to create a lease +message MsgCreateLease { + option (gogoproto.equal) = false; + + BidID bid_id = 1 [ + (gogoproto.customname) = "BidID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = 'yaml:"id"' + ]; +} + +// MsgCreateLeaseResponse is the response from creating a lease +message MsgCreateLeaseResponse {} + +// MsgWithdrawLease defines an SDK message for closing bid +message MsgWithdrawLease { + option (gogoproto.equal) = false; + + LeaseID bid_id = 1 [ + (gogoproto.customname) = "LeaseID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = 'yaml:"id"' + ]; +} + +// MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. +message MsgWithdrawLeaseResponse {} + + +// MsgCloseLease defines an SDK message for closing order +message MsgCloseLease { + option (gogoproto.equal) = false; + + LeaseID lease_id = 1 [ + (gogoproto.customname) = "LeaseID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = 'yaml:"id"' + ]; +} + +// MsgCloseLeaseResponse defines the Msg/CloseLease response type. +message MsgCloseLeaseResponse {} diff --git a/proto/node/akash/market/v1beta5/order.proto b/proto/node/akash/market/v1beta5/order.proto new file mode 100644 index 00000000..9a46c94c --- /dev/null +++ b/proto/node/akash/market/v1beta5/order.proto @@ -0,0 +1,109 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; +import "akash/deployment/v1beta4/groupspec.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta5"; + +// OrderID stores owner and all other seq numbers +message OrderID { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = 'yaml:"owner"' + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = 'yaml:"dseq"' + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = 'yaml:"gseq"' + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = 'yaml:"oseq"' + ]; +} + +// Order stores orderID, state of order and other details +message Order { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + OrderID order_id = 1 [ + (gogoproto.customname) = "OrderID", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "id", + (gogoproto.moretags) = 'yaml:"id"' + ]; + + // State is an enum which refers to state of order + enum State { + option (gogoproto.goproto_enum_prefix) = false; + + // Prefix should start with 0 in enum. So declaring dummy state + invalid = 0 [ + (gogoproto.enumvalue_customname) = "OrderStateInvalid" + ]; + // OrderOpen denotes state for order open + open = 1 [ + (gogoproto.enumvalue_customname) = "OrderOpen" + ]; + // OrderMatched denotes state for order matched + active = 2 [ + (gogoproto.enumvalue_customname) = "OrderActive" + ]; + // OrderClosed denotes state for order lost + closed = 3 [ + (gogoproto.enumvalue_customname) = "OrderClosed" + ]; + } + + State state = 2 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = 'yaml:"state"' + ]; + akash.deployment.v1beta4.GroupSpec spec = 3 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "spec", + (gogoproto.moretags) = 'yaml:"spec"' + ]; + + int64 created_at = 4; +} + +// OrderFilters defines flags for order list filter +message OrderFilters { + option (gogoproto.equal) = false; + + string owner = 1 [ + (gogoproto.jsontag) = "owner", + (gogoproto.moretags) = 'yaml:"owner"' + ]; + uint64 dseq = 2 [ + (gogoproto.customname) = "DSeq", + (gogoproto.jsontag) = "dseq", + (gogoproto.moretags) = 'yaml:"dseq"' + ]; + uint32 gseq = 3 [ + (gogoproto.customname) = "GSeq", + (gogoproto.jsontag) = "gseq", + (gogoproto.moretags) = 'yaml:"gseq"' + ]; + uint32 oseq = 4 [ + (gogoproto.customname) = "OSeq", + (gogoproto.jsontag) = "oseq", + (gogoproto.moretags) = 'yaml:"oseq"' + ]; + string state = 5 [ + (gogoproto.jsontag) = "state", + (gogoproto.moretags) = 'yaml:"state"' + ]; +} diff --git a/proto/node/akash/market/v1beta5/params.proto b/proto/node/akash/market/v1beta5/params.proto new file mode 100644 index 00000000..08a6673e --- /dev/null +++ b/proto/node/akash/market/v1beta5/params.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; +import "cosmos/base/v1beta1/coin.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta5"; + +// Params is the params for the x/market module +message Params { + cosmos.base.v1beta1.Coin bid_min_deposit = 1 [ + (gogoproto.customname) = "BidMinDeposit", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "bid_min_deposit", + (gogoproto.moretags) = 'yaml:"bid_min_deposit"' + ]; + uint32 order_max_bids = 2 [ + (gogoproto.customname) = "OrderMaxBids", + (gogoproto.jsontag) = "order_max_bids", + (gogoproto.moretags) = 'yaml:"order_max_bids"' + ]; +} diff --git a/proto/node/akash/market/v1beta5/query.proto b/proto/node/akash/market/v1beta5/query.proto new file mode 100644 index 00000000..f4745dcd --- /dev/null +++ b/proto/node/akash/market/v1beta5/query.proto @@ -0,0 +1,151 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; + +import "cosmos/base/query/v1beta1/pagination.proto"; + +import "akash/escrow/v1beta3/types.proto"; +import "akash/market/v1beta5/order.proto"; +import "akash/market/v1beta5/bid.proto"; +import "akash/market/v1beta5/lease.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta5"; + +// Query defines the gRPC querier service +service Query { + // Orders queries orders with filters + rpc Orders(QueryOrdersRequest) returns (QueryOrdersResponse) { + option (google.api.http).get = "/akash/market/v1beta5/orders/list"; + } + + // Order queries order details + rpc Order(QueryOrderRequest) returns (QueryOrderResponse) { + option (google.api.http).get = "/akash/market/v1beta5/orders/info"; + } + + // Bids queries bids with filters + rpc Bids(QueryBidsRequest) returns (QueryBidsResponse) { + option (google.api.http).get = "/akash/market/v1beta5/bids/list"; + } + + // Bid queries bid details + rpc Bid(QueryBidRequest) returns (QueryBidResponse) { + option (google.api.http).get = "/akash/market/v1beta5/bids/info"; + } + + // Leases queries leases with filters + rpc Leases(QueryLeasesRequest) returns (QueryLeasesResponse) { + option (google.api.http).get = "/akash/market/v1beta5/leases/list"; + } + + // Lease queries lease details + rpc Lease(QueryLeaseRequest) returns (QueryLeaseResponse) { + option (google.api.http).get = "/akash/market/v1beta5/leases/info"; + } +} + +// QueryOrdersRequest is request type for the Query/Orders RPC method +message QueryOrdersRequest { + OrderFilters filters = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryOrdersResponse is response type for the Query/Orders RPC method +message QueryOrdersResponse { + repeated Order orders = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Orders" + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryOrderRequest is request type for the Query/Order RPC method +message QueryOrderRequest { + OrderID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryOrderResponse is response type for the Query/Order RPC method +message QueryOrderResponse { + Order order = 1 [ + (gogoproto.nullable) = false + ]; +} + +// QueryBidsRequest is request type for the Query/Bids RPC method +message QueryBidsRequest { + BidFilters filters = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryBidsResponse is response type for the Query/Bids RPC method +message QueryBidsResponse { + repeated QueryBidResponse bids = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryBidRequest is request type for the Query/Bid RPC method +message QueryBidRequest { + BidID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryBidResponse is response type for the Query/Bid RPC method +message QueryBidResponse { + Bid bid = 1 [(gogoproto.nullable) = false]; + akash.escrow.v1beta3.Account escrow_account = 2 [ + (gogoproto.nullable) = false + ]; +} + +// QueryLeasesRequest is request type for the Query/Leases RPC method +message QueryLeasesRequest { + LeaseFilters filters = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageRequest pagination = 2; +} + +// QueryLeasesResponse is response type for the Query/Leases RPC method +message QueryLeasesResponse { + repeated QueryLeaseResponse leases = 1 [ + (gogoproto.nullable) = false + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryLeaseRequest is request type for the Query/Lease RPC method +message QueryLeaseRequest { + LeaseID id = 1 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "ID" + ]; +} + +// QueryLeaseResponse is response type for the Query/Lease RPC method +message QueryLeaseResponse { + Lease lease = 1 [ + (gogoproto.nullable) = false + ]; + akash.escrow.v1beta3.FractionalPayment escrow_payment = 2 [ + (gogoproto.nullable) = false + ]; +} diff --git a/proto/node/akash/market/v1beta5/service.proto b/proto/node/akash/market/v1beta5/service.proto new file mode 100644 index 00000000..87b8d96d --- /dev/null +++ b/proto/node/akash/market/v1beta5/service.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; +package akash.market.v1beta5; + +import "akash/market/v1beta5/bid.proto"; +import "akash/market/v1beta5/lease.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/market/v1beta5"; + +// Msg defines the market Msg service +service Msg { + // CreateBid defines a method to create a bid given proper inputs. + rpc CreateBid(MsgCreateBid) returns (MsgCreateBidResponse); + + // CloseBid defines a method to close a bid given proper inputs. + rpc CloseBid(MsgCloseBid) returns (MsgCloseBidResponse); + + // WithdrawLease withdraws accrued funds from the lease payment + rpc WithdrawLease(MsgWithdrawLease) returns (MsgWithdrawLeaseResponse); + + // CreateLease creates a new lease + rpc CreateLease(MsgCreateLease) returns (MsgCreateLeaseResponse); + + // CloseLease defines a method to close an order given proper inputs. + rpc CloseLease(MsgCloseLease) returns (MsgCloseLeaseResponse); +} diff --git a/proto/node/akash/provider/v1beta4/genesis.proto b/proto/node/akash/provider/v1beta4/genesis.proto new file mode 100644 index 00000000..e8001188 --- /dev/null +++ b/proto/node/akash/provider/v1beta4/genesis.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package akash.provider.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/provider/v1beta4/provider.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta4"; + +// GenesisState defines the basic genesis state used by provider module +message GenesisState { + repeated Provider providers = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "providers", + (gogoproto.moretags) = "yaml:\"providers\"" + ]; +} diff --git a/proto/node/akash/provider/v1beta4/provider.proto b/proto/node/akash/provider/v1beta4/provider.proto new file mode 100644 index 00000000..887d0b5d --- /dev/null +++ b/proto/node/akash/provider/v1beta4/provider.proto @@ -0,0 +1,120 @@ +syntax = "proto3"; +package akash.provider.v1beta4; + +import "gogoproto/gogo.proto"; +import "akash/base/attributes/v1/attribute.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta4"; + +// Msg defines the provider Msg service +service Msg { + // CreateProvider defines a method that creates a provider given the proper inputs + rpc CreateProvider(MsgCreateProvider) returns (MsgCreateProviderResponse); + + // UpdateProvider defines a method that updates a provider given the proper inputs + rpc UpdateProvider(MsgUpdateProvider) returns (MsgUpdateProviderResponse); + + // DeleteProvider defines a method that deletes a provider given the proper inputs + rpc DeleteProvider(MsgDeleteProvider) returns (MsgDeleteProviderResponse); +} + +// ProviderInfo +message ProviderInfo { + string email = 1 [ + (gogoproto.customname) = "EMail", + (gogoproto.jsontag) = "email", + (gogoproto.moretags) = "yaml:\"email\"" + ]; + string website = 2 [ + (gogoproto.jsontag) = "website", + (gogoproto.moretags) = "yaml:\"website\"" + ]; +} + +// MsgCreateProvider defines an SDK message for creating a provider +message MsgCreateProvider { + option (gogoproto.equal) = false; + + string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; + string host_uri = 2 [ + (gogoproto.customname) = "HostURI", + (gogoproto.jsontag) = "host_uri", + (gogoproto.moretags) = "yaml:\"host_uri\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; + + ProviderInfo info = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "info", + (gogoproto.moretags) = "yaml:\"info\"" + ]; +} + +// MsgCreateProviderResponse defines the Msg/CreateProvider response type. +message MsgCreateProviderResponse {} + +// MsgUpdateProvider defines an SDK message for updating a provider +message MsgUpdateProvider { + option (gogoproto.equal) = false; + + string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; + string host_uri = 2 [ + (gogoproto.customname) = "HostURI", + (gogoproto.jsontag) = "host_uri", + (gogoproto.moretags) = "yaml:\"host_uri\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; + ProviderInfo info = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "info", + (gogoproto.moretags) = "yaml:\"info\"" + ]; +} + +// MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. +message MsgUpdateProviderResponse {} + +// MsgDeleteProvider defines an SDK message for deleting a provider +message MsgDeleteProvider { + option (gogoproto.equal) = false; + + string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; +} + +// MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. +message MsgDeleteProviderResponse {} + +// Provider stores owner and host details +message Provider { + option (gogoproto.equal) = false; + option (gogoproto.goproto_stringer) = false; + + string owner = 1 [(gogoproto.jsontag) = "owner", (gogoproto.moretags) = "yaml:\"owner\""]; + string host_uri = 2 [ + (gogoproto.customname) = "HostURI", + (gogoproto.jsontag) = "host_uri", + (gogoproto.moretags) = "yaml:\"host_uri\"" + ]; + repeated akash.base.attributes.v1.Attribute attributes = 3 [ + (gogoproto.castrepeated) = "github.com/akash-network/akash-api/go/node/types/attributes/v1.Attributes", + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "attributes", + (gogoproto.moretags) = "yaml:\"attributes\"" + ]; + + ProviderInfo info = 4 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "info", + (gogoproto.moretags) = "yaml:\"info\"" + ]; +} diff --git a/proto/node/akash/provider/v1beta4/query.proto b/proto/node/akash/provider/v1beta4/query.proto new file mode 100644 index 00000000..aacfb053 --- /dev/null +++ b/proto/node/akash/provider/v1beta4/query.proto @@ -0,0 +1,49 @@ +syntax = "proto3"; +package akash.provider.v1beta4; + +import "gogoproto/gogo.proto"; +import "google/api/annotations.proto"; +import "cosmos/base/query/v1beta1/pagination.proto"; +import "akash/provider/v1beta4/provider.proto"; + +option go_package = "github.com/akash-network/akash-api/go/node/provider/v1beta4"; + +// Query defines the gRPC querier service +service Query { + // Providers queries providers + rpc Providers(QueryProvidersRequest) returns (QueryProvidersResponse) { + option (google.api.http).get = "/akash/provider/v1beta4/providers"; + } + + // Provider queries provider details + rpc Provider(QueryProviderRequest) returns (QueryProviderResponse) { + option (google.api.http).get = "/akash/provider/v1beta4/providers/{owner}"; + } +} + +// QueryProvidersRequest is request type for the Query/Providers RPC method +message QueryProvidersRequest { + cosmos.base.query.v1beta1.PageRequest pagination = 1; +} + +// QueryProvidersResponse is response type for the Query/Providers RPC method +message QueryProvidersResponse { + repeated Provider providers = 1 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Providers" + ]; + + cosmos.base.query.v1beta1.PageResponse pagination = 2; +} + +// QueryProviderRequest is request type for the Query/Provider RPC method +message QueryProviderRequest { + string owner = 1; +} + +// QueryProviderResponse is response type for the Query/Provider RPC method +message QueryProviderResponse { + Provider provider = 1 [ + (gogoproto.nullable) = false + ]; +} diff --git a/proto/provider/akash/manifest/v2beta3/group.proto b/proto/provider/akash/manifest/v2beta3/group.proto new file mode 100644 index 00000000..7b8dc4bd --- /dev/null +++ b/proto/provider/akash/manifest/v2beta3/group.proto @@ -0,0 +1,27 @@ +syntax = "proto3"; + +package akash.manifest.v2beta3; + +import "gogoproto/gogo.proto"; +import "akash/manifest/v2beta3/service.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta3"; + +// Group store name and list of services +message Group { + // getters must be implemented as value receiver + // due to GetName collision + option (gogoproto.goproto_getters) = false; + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + repeated Service services = 2 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "Services", + (gogoproto.jsontag) = "services", + (gogoproto.moretags) = "yaml:\"services\"" + ]; +} diff --git a/proto/provider/akash/manifest/v2beta3/httpoptions.proto b/proto/provider/akash/manifest/v2beta3/httpoptions.proto new file mode 100644 index 00000000..8f7eced2 --- /dev/null +++ b/proto/provider/akash/manifest/v2beta3/httpoptions.proto @@ -0,0 +1,38 @@ +syntax = "proto3"; + +package akash.manifest.v2beta3; + +import "gogoproto/gogo.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta3"; + +// ServiceExposeHTTPOptions +message ServiceExposeHTTPOptions { + uint32 max_body_size = 1 [ + (gogoproto.jsontag) = "maxBodySize", + (gogoproto.moretags) = "yaml:\"maxBodySize\"" + ]; + uint32 read_timeout = 2 [ + (gogoproto.jsontag) = "readTimeout", + (gogoproto.moretags) = "yaml:\"readTimeout\"" + ]; + uint32 send_timeout = 3 [ + (gogoproto.jsontag) = "sendTimeout", + (gogoproto.moretags) = "yaml:\"sendTimeout\"" + ]; + uint32 next_tries = 4 [ + (gogoproto.jsontag) = "nextTries", + (gogoproto.moretags) = "yaml:\"nextTries\"" + ]; + uint32 next_timeout = 5 [ + (gogoproto.jsontag) = "nextTimeout", + (gogoproto.moretags) = "yaml:\"nextTimeout\"" + ]; + repeated string next_cases = 6 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "nextCases,omitempty", + (gogoproto.moretags) = "yaml:\"nextCases,omitempty\"" + ]; +} diff --git a/proto/provider/akash/manifest/v2beta3/service.proto b/proto/provider/akash/manifest/v2beta3/service.proto new file mode 100644 index 00000000..55c02967 --- /dev/null +++ b/proto/provider/akash/manifest/v2beta3/service.proto @@ -0,0 +1,106 @@ +syntax = "proto3"; + +package akash.manifest.v2beta3; + +import "gogoproto/gogo.proto"; +import "akash/manifest/v2beta3/serviceexpose.proto"; +import "akash/base/resources/v1/resources.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta3"; + +// StorageParams +message StorageParams { + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + string mount = 2 [ + (gogoproto.jsontag) = "mount", + (gogoproto.moretags) = "yaml:\"mount\"" + ]; + bool read_only = 3 [ + (gogoproto.jsontag) = "readOnly", + (gogoproto.moretags) = "yaml:\"readOnly\"" + ]; +} + +// ServiceParams +message ServiceParams { + repeated StorageParams storage = 1 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "storage", + (gogoproto.moretags) = "yaml:\"storage\"" + ]; + ImageCredentials credentials = 10 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "credentials,omitempty", + (gogoproto.moretags) = "yaml:\"credentials,omitempty\"" + ]; +} + +// Credentials to fetch image from registry +message ImageCredentials { + string host = 1 [ + (gogoproto.jsontag) = "host", + (gogoproto.moretags) = "yaml:\"host\"" + ]; + string email = 2 [ + (gogoproto.jsontag) = "email", + (gogoproto.moretags) = "yaml:\"email\"" + ]; + string username = 3 [ + (gogoproto.jsontag) = "username", + (gogoproto.moretags) = "yaml:\"username\"" + ]; + string password = 4 [ + (gogoproto.jsontag) = "password", + (gogoproto.moretags) = "yaml:\"password\"" + ]; +} + +// Service stores name, image, args, env, unit, count and expose list of service +message Service { + string name = 1 [ + (gogoproto.jsontag) = "name", + (gogoproto.moretags) = "yaml:\"name\"" + ]; + string image = 2 [ + (gogoproto.jsontag) = "image", + (gogoproto.moretags) = "yaml:\"image\"" + ]; + repeated string command = 3 [ + (gogoproto.jsontag) = "command", + (gogoproto.moretags) = "yaml:\"command\"" + ]; + repeated string args = 4 [ + (gogoproto.jsontag) = "args", + (gogoproto.moretags) = "yaml:\"args\"" + ]; + repeated string env = 5 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "env", + (gogoproto.moretags) = "yaml:\"env\"" + ]; + akash.base.resources.v1.Resources resources = 6 [ + (gogoproto.nullable) = false, + (gogoproto.jsontag) = "resources", + (gogoproto.moretags) = "yaml:\"resources\"" + ]; + uint32 count = 7 [ + (gogoproto.jsontag) = "count", + (gogoproto.moretags) = "yaml:\"count\"" + ]; + repeated ServiceExpose expose = 8 [ + (gogoproto.nullable) = false, + (gogoproto.castrepeated) = "ServiceExposes", + (gogoproto.jsontag) = "expose", + (gogoproto.moretags) = "yaml:\"expose\"" + ]; + ServiceParams params = 9 [ + (gogoproto.nullable) = true, + (gogoproto.jsontag) = "params,omitempty", + (gogoproto.moretags) = "yaml:\"params,omitempty\"" + ]; +} diff --git a/proto/provider/akash/manifest/v2beta3/serviceexpose.proto b/proto/provider/akash/manifest/v2beta3/serviceexpose.proto new file mode 100644 index 00000000..69736de5 --- /dev/null +++ b/proto/provider/akash/manifest/v2beta3/serviceexpose.proto @@ -0,0 +1,60 @@ +syntax = "proto3"; + +package akash.manifest.v2beta3; + +import "gogoproto/gogo.proto"; +import "akash/manifest/v2beta3/httpoptions.proto"; + +option (gogoproto.goproto_stringer_all) = false; +option (gogoproto.stringer_all) = true; +option go_package = "github.com/akash-network/akash-api/go/manifest/v2beta3"; + +// ServiceExpose stores exposed ports and hosts details +message ServiceExpose { + option (gogoproto.goproto_getters) = false; + + // port on the container + uint32 port = 1 [ + (gogoproto.jsontag) = "port", + (gogoproto.moretags) = "yaml:\"port\"" + ]; + // port on the service definition + uint32 external_port = 2 [ + (gogoproto.jsontag) = "externalPort", + (gogoproto.moretags) = "yaml:\"externalPort\"" + ]; + string proto = 3 [ + (gogoproto.casttype) = "ServiceProtocol", + (gogoproto.jsontag) = "proto", + (gogoproto.moretags) = "yaml:\"proto\"" + ]; + string service = 4 [ + (gogoproto.jsontag) = "service", + (gogoproto.moretags) = "yaml:\"service\"" + ]; + bool global = 5 [ + (gogoproto.jsontag) = "global", + (gogoproto.moretags) = "yaml:\"global\"" + ]; + repeated string hosts = 6 [ + (gogoproto.jsontag) = "hosts", + (gogoproto.moretags) = "yaml:\"hosts\"" + ]; + ServiceExposeHTTPOptions http_options = 7 [ + (gogoproto.nullable) = false, + (gogoproto.customname) = "HTTPOptions", + (gogoproto.jsontag) = "httpOptions", + (gogoproto.moretags) = "yaml:\"httpOptions\"" + ]; + // The name of the IP address associated with this, if any + string ip = 8 [ + (gogoproto.customname) = "IP", + (gogoproto.jsontag) = "ip", + (gogoproto.moretags) = "yaml:\"ip\"" + ]; + // The sequence number of the associated endpoint in the on-chain data + uint32 endpoint_sequence_number = 9 [ + (gogoproto.jsontag) = "endpointSequenceNumber", + (gogoproto.moretags) = "yaml:\"endpointSequenceNumber\"" + ]; +} diff --git a/script/shellcheck.sh b/script/shellcheck.sh index 1b560d3b..8b7fa613 100644 --- a/script/shellcheck.sh +++ b/script/shellcheck.sh @@ -2,7 +2,7 @@ unset FAILED -FILES=$(find /shellcheck/ -type f -name "*.sh" ! -path "/shellcheck/vendor/*" ! -path "/shellcheck/.git/*" ! -path "/shellcheck/ts/.husky/*") +FILES=$(find /shellcheck/ -type f -name "*.sh" ! -path "/shellcheck/vendor/*" ! -path "/shellcheck/.git/*" ! -path "/shellcheck/ts/.husky/*" ! -path "/shellcheck/ts/node_modules/*") for file in $FILES; do name="$(basename "$file")"; diff --git a/ts/src/generated/akash/audit/v1beta4/audit.ts b/ts/src/generated/akash/audit/v1beta4/audit.ts new file mode 100644 index 00000000..ee1a3ebf --- /dev/null +++ b/ts/src/generated/akash/audit/v1beta4/audit.ts @@ -0,0 +1,880 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Attribute } from '../../base/attributes/v1/attribute'; + +/** Provider stores owner auditor and attributes details */ +export interface Provider { + $type: 'akash.audit.v1beta4.Provider'; + owner: string; + auditor: string; + attributes: Attribute[]; +} + +/** Attributes */ +export interface AuditedAttributes { + $type: 'akash.audit.v1beta4.AuditedAttributes'; + owner: string; + auditor: string; + attributes: Attribute[]; +} + +/** AttributesResponse represents details of deployment along with group details */ +export interface AttributesResponse { + $type: 'akash.audit.v1beta4.AttributesResponse'; + attributes: AuditedAttributes[]; +} + +/** AttributesFilters defines filters used to filter deployments */ +export interface AttributesFilters { + $type: 'akash.audit.v1beta4.AttributesFilters'; + auditors: string[]; + owners: string[]; +} + +/** MsgSignProviderAttributes defines an SDK message for signing a provider attributes */ +export interface MsgSignProviderAttributes { + $type: 'akash.audit.v1beta4.MsgSignProviderAttributes'; + owner: string; + auditor: string; + attributes: Attribute[]; +} + +/** MsgSignProviderAttributesResponse defines the Msg/CreateProvider response type. */ +export interface MsgSignProviderAttributesResponse { + $type: 'akash.audit.v1beta4.MsgSignProviderAttributesResponse'; +} + +/** MsgDeleteProviderAttributes defined the Msg/DeleteProviderAttributes */ +export interface MsgDeleteProviderAttributes { + $type: 'akash.audit.v1beta4.MsgDeleteProviderAttributes'; + owner: string; + auditor: string; + keys: string[]; +} + +/** MsgDeleteProviderAttributesResponse defines the Msg/ProviderAttributes response type. */ +export interface MsgDeleteProviderAttributesResponse { + $type: 'akash.audit.v1beta4.MsgDeleteProviderAttributesResponse'; +} + +function createBaseProvider(): Provider { + return { + $type: 'akash.audit.v1beta4.Provider', + owner: '', + auditor: '', + attributes: [], + }; +} + +export const Provider = { + $type: 'akash.audit.v1beta4.Provider' as const, + + encode( + message: Provider, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (message.auditor !== '') { + writer.uint32(18).string(message.auditor); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Provider { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.auditor = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Provider { + return { + $type: Provider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : '', + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Provider): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (message.auditor !== '') { + obj.auditor = message.auditor; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Provider { + return Provider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Provider { + const message = createBaseProvider(); + message.owner = object.owner ?? ''; + message.auditor = object.auditor ?? ''; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Provider.$type, Provider); + +function createBaseAuditedAttributes(): AuditedAttributes { + return { + $type: 'akash.audit.v1beta4.AuditedAttributes', + owner: '', + auditor: '', + attributes: [], + }; +} + +export const AuditedAttributes = { + $type: 'akash.audit.v1beta4.AuditedAttributes' as const, + + encode( + message: AuditedAttributes, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (message.auditor !== '') { + writer.uint32(18).string(message.auditor); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AuditedAttributes { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAuditedAttributes(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.auditor = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): AuditedAttributes { + return { + $type: AuditedAttributes.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : '', + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: AuditedAttributes): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (message.auditor !== '') { + obj.auditor = message.auditor; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): AuditedAttributes { + return AuditedAttributes.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): AuditedAttributes { + const message = createBaseAuditedAttributes(); + message.owner = object.owner ?? ''; + message.auditor = object.auditor ?? ''; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(AuditedAttributes.$type, AuditedAttributes); + +function createBaseAttributesResponse(): AttributesResponse { + return { $type: 'akash.audit.v1beta4.AttributesResponse', attributes: [] }; +} + +export const AttributesResponse = { + $type: 'akash.audit.v1beta4.AttributesResponse' as const, + + encode( + message: AttributesResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.attributes) { + AuditedAttributes.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AttributesResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAttributesResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.attributes.push( + AuditedAttributes.decode(reader, reader.uint32()), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): AttributesResponse { + return { + $type: AttributesResponse.$type, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => AuditedAttributes.fromJSON(e)) + : [], + }; + }, + + toJSON(message: AttributesResponse): unknown { + const obj: any = {}; + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => + AuditedAttributes.toJSON(e), + ); + } + return obj; + }, + + create(base?: DeepPartial): AttributesResponse { + return AttributesResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): AttributesResponse { + const message = createBaseAttributesResponse(); + message.attributes = + object.attributes?.map((e) => AuditedAttributes.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(AttributesResponse.$type, AttributesResponse); + +function createBaseAttributesFilters(): AttributesFilters { + return { + $type: 'akash.audit.v1beta4.AttributesFilters', + auditors: [], + owners: [], + }; +} + +export const AttributesFilters = { + $type: 'akash.audit.v1beta4.AttributesFilters' as const, + + encode( + message: AttributesFilters, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.auditors) { + writer.uint32(10).string(v!); + } + for (const v of message.owners) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): AttributesFilters { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAttributesFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.auditors.push(reader.string()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.owners.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): AttributesFilters { + return { + $type: AttributesFilters.$type, + auditors: globalThis.Array.isArray(object?.auditors) + ? object.auditors.map((e: any) => globalThis.String(e)) + : [], + owners: globalThis.Array.isArray(object?.owners) + ? object.owners.map((e: any) => globalThis.String(e)) + : [], + }; + }, + + toJSON(message: AttributesFilters): unknown { + const obj: any = {}; + if (message.auditors?.length) { + obj.auditors = message.auditors; + } + if (message.owners?.length) { + obj.owners = message.owners; + } + return obj; + }, + + create(base?: DeepPartial): AttributesFilters { + return AttributesFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): AttributesFilters { + const message = createBaseAttributesFilters(); + message.auditors = object.auditors?.map((e) => e) || []; + message.owners = object.owners?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(AttributesFilters.$type, AttributesFilters); + +function createBaseMsgSignProviderAttributes(): MsgSignProviderAttributes { + return { + $type: 'akash.audit.v1beta4.MsgSignProviderAttributes', + owner: '', + auditor: '', + attributes: [], + }; +} + +export const MsgSignProviderAttributes = { + $type: 'akash.audit.v1beta4.MsgSignProviderAttributes' as const, + + encode( + message: MsgSignProviderAttributes, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (message.auditor !== '') { + writer.uint32(18).string(message.auditor); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgSignProviderAttributes { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgSignProviderAttributes(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.auditor = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgSignProviderAttributes { + return { + $type: MsgSignProviderAttributes.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : '', + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: MsgSignProviderAttributes): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (message.auditor !== '') { + obj.auditor = message.auditor; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create( + base?: DeepPartial, + ): MsgSignProviderAttributes { + return MsgSignProviderAttributes.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): MsgSignProviderAttributes { + const message = createBaseMsgSignProviderAttributes(); + message.owner = object.owner ?? ''; + message.auditor = object.auditor ?? ''; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set( + MsgSignProviderAttributes.$type, + MsgSignProviderAttributes, +); + +function createBaseMsgSignProviderAttributesResponse(): MsgSignProviderAttributesResponse { + return { $type: 'akash.audit.v1beta4.MsgSignProviderAttributesResponse' }; +} + +export const MsgSignProviderAttributesResponse = { + $type: 'akash.audit.v1beta4.MsgSignProviderAttributesResponse' as const, + + encode( + _: MsgSignProviderAttributesResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgSignProviderAttributesResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgSignProviderAttributesResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgSignProviderAttributesResponse { + return { $type: MsgSignProviderAttributesResponse.$type }; + }, + + toJSON(_: MsgSignProviderAttributesResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgSignProviderAttributesResponse { + return MsgSignProviderAttributesResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgSignProviderAttributesResponse { + const message = createBaseMsgSignProviderAttributesResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgSignProviderAttributesResponse.$type, + MsgSignProviderAttributesResponse, +); + +function createBaseMsgDeleteProviderAttributes(): MsgDeleteProviderAttributes { + return { + $type: 'akash.audit.v1beta4.MsgDeleteProviderAttributes', + owner: '', + auditor: '', + keys: [], + }; +} + +export const MsgDeleteProviderAttributes = { + $type: 'akash.audit.v1beta4.MsgDeleteProviderAttributes' as const, + + encode( + message: MsgDeleteProviderAttributes, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (message.auditor !== '') { + writer.uint32(18).string(message.auditor); + } + for (const v of message.keys) { + writer.uint32(26).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgDeleteProviderAttributes { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDeleteProviderAttributes(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.auditor = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.keys.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgDeleteProviderAttributes { + return { + $type: MsgDeleteProviderAttributes.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : '', + keys: globalThis.Array.isArray(object?.keys) + ? object.keys.map((e: any) => globalThis.String(e)) + : [], + }; + }, + + toJSON(message: MsgDeleteProviderAttributes): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (message.auditor !== '') { + obj.auditor = message.auditor; + } + if (message.keys?.length) { + obj.keys = message.keys; + } + return obj; + }, + + create( + base?: DeepPartial, + ): MsgDeleteProviderAttributes { + return MsgDeleteProviderAttributes.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): MsgDeleteProviderAttributes { + const message = createBaseMsgDeleteProviderAttributes(); + message.owner = object.owner ?? ''; + message.auditor = object.auditor ?? ''; + message.keys = object.keys?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + MsgDeleteProviderAttributes.$type, + MsgDeleteProviderAttributes, +); + +function createBaseMsgDeleteProviderAttributesResponse(): MsgDeleteProviderAttributesResponse { + return { $type: 'akash.audit.v1beta4.MsgDeleteProviderAttributesResponse' }; +} + +export const MsgDeleteProviderAttributesResponse = { + $type: 'akash.audit.v1beta4.MsgDeleteProviderAttributesResponse' as const, + + encode( + _: MsgDeleteProviderAttributesResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgDeleteProviderAttributesResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDeleteProviderAttributesResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgDeleteProviderAttributesResponse { + return { $type: MsgDeleteProviderAttributesResponse.$type }; + }, + + toJSON(_: MsgDeleteProviderAttributesResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgDeleteProviderAttributesResponse { + return MsgDeleteProviderAttributesResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgDeleteProviderAttributesResponse { + const message = createBaseMsgDeleteProviderAttributesResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgDeleteProviderAttributesResponse.$type, + MsgDeleteProviderAttributesResponse, +); + +/** Msg defines the provider Msg service */ +export interface Msg { + /** SignProviderAttributes defines a method that signs provider attributes */ + SignProviderAttributes( + request: MsgSignProviderAttributes, + ): Promise; + /** DeleteProviderAttributes defines a method that deletes provider attributes */ + DeleteProviderAttributes( + request: MsgDeleteProviderAttributes, + ): Promise; +} + +export const MsgServiceName = 'akash.audit.v1beta4.Msg'; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.SignProviderAttributes = this.SignProviderAttributes.bind(this); + this.DeleteProviderAttributes = this.DeleteProviderAttributes.bind(this); + } + SignProviderAttributes( + request: MsgSignProviderAttributes, + ): Promise { + const data = MsgSignProviderAttributes.encode(request).finish(); + const promise = this.rpc.request( + this.service, + 'SignProviderAttributes', + data, + ); + return promise.then((data) => + MsgSignProviderAttributesResponse.decode(_m0.Reader.create(data)), + ); + } + + DeleteProviderAttributes( + request: MsgDeleteProviderAttributes, + ): Promise { + const data = MsgDeleteProviderAttributes.encode(request).finish(); + const promise = this.rpc.request( + this.service, + 'DeleteProviderAttributes', + data, + ); + return promise.then((data) => + MsgDeleteProviderAttributesResponse.decode(_m0.Reader.create(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/audit/v1beta4/genesis.ts b/ts/src/generated/akash/audit/v1beta4/genesis.ts new file mode 100644 index 00000000..7669401b --- /dev/null +++ b/ts/src/generated/akash/audit/v1beta4/genesis.ts @@ -0,0 +1,112 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { AuditedAttributes } from './audit'; + +/** GenesisState defines the basic genesis state used by audit module */ +export interface GenesisState { + $type: 'akash.audit.v1beta4.GenesisState'; + attributes: AuditedAttributes[]; +} + +function createBaseGenesisState(): GenesisState { + return { $type: 'akash.audit.v1beta4.GenesisState', attributes: [] }; +} + +export const GenesisState = { + $type: 'akash.audit.v1beta4.GenesisState' as const, + + encode( + message: GenesisState, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.attributes) { + AuditedAttributes.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.attributes.push( + AuditedAttributes.decode(reader, reader.uint32()), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => AuditedAttributes.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => + AuditedAttributes.toJSON(e), + ); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.attributes = + object.attributes?.map((e) => AuditedAttributes.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/ts/src/generated/akash/audit/v1beta4/query.ts b/ts/src/generated/akash/audit/v1beta4/query.ts new file mode 100644 index 00000000..434dcb42 --- /dev/null +++ b/ts/src/generated/akash/audit/v1beta4/query.ts @@ -0,0 +1,765 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { + PageRequest, + PageResponse, +} from '../../../cosmos/base/query/v1beta1/pagination'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Provider } from './audit'; + +/** QueryProvidersResponse is response type for the Query/Providers RPC method */ +export interface QueryProvidersResponse { + $type: 'akash.audit.v1beta4.QueryProvidersResponse'; + providers: Provider[]; + pagination: PageResponse | undefined; +} + +/** QueryProviderRequest is request type for the Query/Provider RPC method */ +export interface QueryProviderRequest { + $type: 'akash.audit.v1beta4.QueryProviderRequest'; + auditor: string; + owner: string; +} + +/** QueryAllProvidersAttributesRequest is request type for the Query/All Providers RPC method */ +export interface QueryAllProvidersAttributesRequest { + $type: 'akash.audit.v1beta4.QueryAllProvidersAttributesRequest'; + pagination: PageRequest | undefined; +} + +/** QueryProviderAttributesRequest is request type for the Query/Provider RPC method */ +export interface QueryProviderAttributesRequest { + $type: 'akash.audit.v1beta4.QueryProviderAttributesRequest'; + owner: string; + pagination: PageRequest | undefined; +} + +/** QueryProviderAuditorRequest is request type for the Query/Providers RPC method */ +export interface QueryProviderAuditorRequest { + $type: 'akash.audit.v1beta4.QueryProviderAuditorRequest'; + auditor: string; + owner: string; +} + +/** QueryAuditorAttributesRequest is request type for the Query/Providers RPC method */ +export interface QueryAuditorAttributesRequest { + $type: 'akash.audit.v1beta4.QueryAuditorAttributesRequest'; + auditor: string; + pagination: PageRequest | undefined; +} + +function createBaseQueryProvidersResponse(): QueryProvidersResponse { + return { + $type: 'akash.audit.v1beta4.QueryProvidersResponse', + providers: [], + pagination: undefined, + }; +} + +export const QueryProvidersResponse = { + $type: 'akash.audit.v1beta4.QueryProvidersResponse' as const, + + encode( + message: QueryProvidersResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.providers) { + Provider.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork(), + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryProvidersResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProvidersResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.providers.push(Provider.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProvidersResponse { + return { + $type: QueryProvidersResponse.$type, + providers: globalThis.Array.isArray(object?.providers) + ? object.providers.map((e: any) => Provider.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryProvidersResponse): unknown { + const obj: any = {}; + if (message.providers?.length) { + obj.providers = message.providers.map((e) => Provider.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryProvidersResponse { + return QueryProvidersResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProvidersResponse { + const message = createBaseQueryProvidersResponse(); + message.providers = + object.providers?.map((e) => Provider.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryProvidersResponse.$type, QueryProvidersResponse); + +function createBaseQueryProviderRequest(): QueryProviderRequest { + return { + $type: 'akash.audit.v1beta4.QueryProviderRequest', + auditor: '', + owner: '', + }; +} + +export const QueryProviderRequest = { + $type: 'akash.audit.v1beta4.QueryProviderRequest' as const, + + encode( + message: QueryProviderRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.auditor !== '') { + writer.uint32(10).string(message.auditor); + } + if (message.owner !== '') { + writer.uint32(18).string(message.owner); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryProviderRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.auditor = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderRequest { + return { + $type: QueryProviderRequest.$type, + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : '', + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + }; + }, + + toJSON(message: QueryProviderRequest): unknown { + const obj: any = {}; + if (message.auditor !== '') { + obj.auditor = message.auditor; + } + if (message.owner !== '') { + obj.owner = message.owner; + } + return obj; + }, + + create(base?: DeepPartial): QueryProviderRequest { + return QueryProviderRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryProviderRequest { + const message = createBaseQueryProviderRequest(); + message.auditor = object.auditor ?? ''; + message.owner = object.owner ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(QueryProviderRequest.$type, QueryProviderRequest); + +function createBaseQueryAllProvidersAttributesRequest(): QueryAllProvidersAttributesRequest { + return { + $type: 'akash.audit.v1beta4.QueryAllProvidersAttributesRequest', + pagination: undefined, + }; +} + +export const QueryAllProvidersAttributesRequest = { + $type: 'akash.audit.v1beta4.QueryAllProvidersAttributesRequest' as const, + + encode( + message: QueryAllProvidersAttributesRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryAllProvidersAttributesRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryAllProvidersAttributesRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryAllProvidersAttributesRequest { + return { + $type: QueryAllProvidersAttributesRequest.$type, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryAllProvidersAttributesRequest): unknown { + const obj: any = {}; + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryAllProvidersAttributesRequest { + return QueryAllProvidersAttributesRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryAllProvidersAttributesRequest { + const message = createBaseQueryAllProvidersAttributesRequest(); + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryAllProvidersAttributesRequest.$type, + QueryAllProvidersAttributesRequest, +); + +function createBaseQueryProviderAttributesRequest(): QueryProviderAttributesRequest { + return { + $type: 'akash.audit.v1beta4.QueryProviderAttributesRequest', + owner: '', + pagination: undefined, + }; +} + +export const QueryProviderAttributesRequest = { + $type: 'akash.audit.v1beta4.QueryProviderAttributesRequest' as const, + + encode( + message: QueryProviderAttributesRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryProviderAttributesRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderAttributesRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderAttributesRequest { + return { + $type: QueryProviderAttributesRequest.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryProviderAttributesRequest): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryProviderAttributesRequest { + return QueryProviderAttributesRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProviderAttributesRequest { + const message = createBaseQueryProviderAttributesRequest(); + message.owner = object.owner ?? ''; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryProviderAttributesRequest.$type, + QueryProviderAttributesRequest, +); + +function createBaseQueryProviderAuditorRequest(): QueryProviderAuditorRequest { + return { + $type: 'akash.audit.v1beta4.QueryProviderAuditorRequest', + auditor: '', + owner: '', + }; +} + +export const QueryProviderAuditorRequest = { + $type: 'akash.audit.v1beta4.QueryProviderAuditorRequest' as const, + + encode( + message: QueryProviderAuditorRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.auditor !== '') { + writer.uint32(10).string(message.auditor); + } + if (message.owner !== '') { + writer.uint32(18).string(message.owner); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryProviderAuditorRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderAuditorRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.auditor = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderAuditorRequest { + return { + $type: QueryProviderAuditorRequest.$type, + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : '', + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + }; + }, + + toJSON(message: QueryProviderAuditorRequest): unknown { + const obj: any = {}; + if (message.auditor !== '') { + obj.auditor = message.auditor; + } + if (message.owner !== '') { + obj.owner = message.owner; + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryProviderAuditorRequest { + return QueryProviderAuditorRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProviderAuditorRequest { + const message = createBaseQueryProviderAuditorRequest(); + message.auditor = object.auditor ?? ''; + message.owner = object.owner ?? ''; + return message; + }, +}; + +messageTypeRegistry.set( + QueryProviderAuditorRequest.$type, + QueryProviderAuditorRequest, +); + +function createBaseQueryAuditorAttributesRequest(): QueryAuditorAttributesRequest { + return { + $type: 'akash.audit.v1beta4.QueryAuditorAttributesRequest', + auditor: '', + pagination: undefined, + }; +} + +export const QueryAuditorAttributesRequest = { + $type: 'akash.audit.v1beta4.QueryAuditorAttributesRequest' as const, + + encode( + message: QueryAuditorAttributesRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.auditor !== '') { + writer.uint32(10).string(message.auditor); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryAuditorAttributesRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryAuditorAttributesRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.auditor = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryAuditorAttributesRequest { + return { + $type: QueryAuditorAttributesRequest.$type, + auditor: isSet(object.auditor) ? globalThis.String(object.auditor) : '', + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryAuditorAttributesRequest): unknown { + const obj: any = {}; + if (message.auditor !== '') { + obj.auditor = message.auditor; + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryAuditorAttributesRequest { + return QueryAuditorAttributesRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryAuditorAttributesRequest { + const message = createBaseQueryAuditorAttributesRequest(); + message.auditor = object.auditor ?? ''; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryAuditorAttributesRequest.$type, + QueryAuditorAttributesRequest, +); + +/** Query defines the gRPC querier service */ +export interface Query { + /** + * AllProvidersAttributes queries all providers + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + */ + AllProvidersAttributes( + request: QueryAllProvidersAttributesRequest, + ): Promise; + /** + * ProviderAttributes queries all provider signed attributes + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + */ + ProviderAttributes( + request: QueryProviderAttributesRequest, + ): Promise; + /** + * ProviderAuditorAttributes queries provider signed attributes by specific auditor + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + */ + ProviderAuditorAttributes( + request: QueryProviderAuditorRequest, + ): Promise; + /** + * AuditorAttributes queries all providers signed by this auditor + * buf:lint:ignore RPC_REQUEST_RESPONSE_UNIQUE + * buf:lint:ignore RPC_RESPONSE_STANDARD_NAME + */ + AuditorAttributes( + request: QueryAuditorAttributesRequest, + ): Promise; +} + +export const QueryServiceName = 'akash.audit.v1beta4.Query'; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.AllProvidersAttributes = this.AllProvidersAttributes.bind(this); + this.ProviderAttributes = this.ProviderAttributes.bind(this); + this.ProviderAuditorAttributes = this.ProviderAuditorAttributes.bind(this); + this.AuditorAttributes = this.AuditorAttributes.bind(this); + } + AllProvidersAttributes( + request: QueryAllProvidersAttributesRequest, + ): Promise { + const data = QueryAllProvidersAttributesRequest.encode(request).finish(); + const promise = this.rpc.request( + this.service, + 'AllProvidersAttributes', + data, + ); + return promise.then((data) => + QueryProvidersResponse.decode(_m0.Reader.create(data)), + ); + } + + ProviderAttributes( + request: QueryProviderAttributesRequest, + ): Promise { + const data = QueryProviderAttributesRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'ProviderAttributes', data); + return promise.then((data) => + QueryProvidersResponse.decode(_m0.Reader.create(data)), + ); + } + + ProviderAuditorAttributes( + request: QueryProviderAuditorRequest, + ): Promise { + const data = QueryProviderAuditorRequest.encode(request).finish(); + const promise = this.rpc.request( + this.service, + 'ProviderAuditorAttributes', + data, + ); + return promise.then((data) => + QueryProvidersResponse.decode(_m0.Reader.create(data)), + ); + } + + AuditorAttributes( + request: QueryAuditorAttributesRequest, + ): Promise { + const data = QueryAuditorAttributesRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'AuditorAttributes', data); + return promise.then((data) => + QueryProvidersResponse.decode(_m0.Reader.create(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/base/attributes/v1/attribute.ts b/ts/src/generated/akash/base/attributes/v1/attribute.ts new file mode 100644 index 00000000..7da84f97 --- /dev/null +++ b/ts/src/generated/akash/base/attributes/v1/attribute.ts @@ -0,0 +1,334 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../../typeRegistry'; + +/** Attribute represents key value pair */ +export interface Attribute { + $type: 'akash.base.attributes.v1.Attribute'; + key: string; + value: string; +} + +/** + * SignedBy represents validation accounts that tenant expects signatures for provider attributes + * AllOf has precedence i.e. if there is at least one entry AnyOf is ignored regardless to how many + * entries there + * this behaviour to be discussed + */ +export interface SignedBy { + $type: 'akash.base.attributes.v1.SignedBy'; + /** all_of all keys in this list must have signed attributes */ + allOf: string[]; + /** any_of at least of of the keys from the list must have signed attributes */ + anyOf: string[]; +} + +/** PlacementRequirements */ +export interface PlacementRequirements { + $type: 'akash.base.attributes.v1.PlacementRequirements'; + /** SignedBy list of keys that tenants expect to have signatures from */ + signedBy: SignedBy | undefined; + /** Attribute list of attributes tenant expects from the provider */ + attributes: Attribute[]; +} + +function createBaseAttribute(): Attribute { + return { $type: 'akash.base.attributes.v1.Attribute', key: '', value: '' }; +} + +export const Attribute = { + $type: 'akash.base.attributes.v1.Attribute' as const, + + encode( + message: Attribute, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.key !== '') { + writer.uint32(10).string(message.key); + } + if (message.value !== '') { + writer.uint32(18).string(message.value); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Attribute { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseAttribute(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.key = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.value = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Attribute { + return { + $type: Attribute.$type, + key: isSet(object.key) ? globalThis.String(object.key) : '', + value: isSet(object.value) ? globalThis.String(object.value) : '', + }; + }, + + toJSON(message: Attribute): unknown { + const obj: any = {}; + if (message.key !== '') { + obj.key = message.key; + } + if (message.value !== '') { + obj.value = message.value; + } + return obj; + }, + + create(base?: DeepPartial): Attribute { + return Attribute.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Attribute { + const message = createBaseAttribute(); + message.key = object.key ?? ''; + message.value = object.value ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(Attribute.$type, Attribute); + +function createBaseSignedBy(): SignedBy { + return { $type: 'akash.base.attributes.v1.SignedBy', allOf: [], anyOf: [] }; +} + +export const SignedBy = { + $type: 'akash.base.attributes.v1.SignedBy' as const, + + encode( + message: SignedBy, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.allOf) { + writer.uint32(10).string(v!); + } + for (const v of message.anyOf) { + writer.uint32(18).string(v!); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): SignedBy { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseSignedBy(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.allOf.push(reader.string()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.anyOf.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): SignedBy { + return { + $type: SignedBy.$type, + allOf: globalThis.Array.isArray(object?.allOf) + ? object.allOf.map((e: any) => globalThis.String(e)) + : [], + anyOf: globalThis.Array.isArray(object?.anyOf) + ? object.anyOf.map((e: any) => globalThis.String(e)) + : [], + }; + }, + + toJSON(message: SignedBy): unknown { + const obj: any = {}; + if (message.allOf?.length) { + obj.allOf = message.allOf; + } + if (message.anyOf?.length) { + obj.anyOf = message.anyOf; + } + return obj; + }, + + create(base?: DeepPartial): SignedBy { + return SignedBy.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): SignedBy { + const message = createBaseSignedBy(); + message.allOf = object.allOf?.map((e) => e) || []; + message.anyOf = object.anyOf?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set(SignedBy.$type, SignedBy); + +function createBasePlacementRequirements(): PlacementRequirements { + return { + $type: 'akash.base.attributes.v1.PlacementRequirements', + signedBy: undefined, + attributes: [], + }; +} + +export const PlacementRequirements = { + $type: 'akash.base.attributes.v1.PlacementRequirements' as const, + + encode( + message: PlacementRequirements, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.signedBy !== undefined) { + SignedBy.encode(message.signedBy, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): PlacementRequirements { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBasePlacementRequirements(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.signedBy = SignedBy.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): PlacementRequirements { + return { + $type: PlacementRequirements.$type, + signedBy: isSet(object.signedBy) + ? SignedBy.fromJSON(object.signedBy) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: PlacementRequirements): unknown { + const obj: any = {}; + if (message.signedBy !== undefined) { + obj.signedBy = SignedBy.toJSON(message.signedBy); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): PlacementRequirements { + return PlacementRequirements.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): PlacementRequirements { + const message = createBasePlacementRequirements(); + message.signedBy = + object.signedBy !== undefined && object.signedBy !== null + ? SignedBy.fromPartial(object.signedBy) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(PlacementRequirements.$type, PlacementRequirements); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/base/resources/v1/cpu.ts b/ts/src/generated/akash/base/resources/v1/cpu.ts new file mode 100644 index 00000000..e1d1e4b0 --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1/cpu.ts @@ -0,0 +1,135 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../../typeRegistry'; +import { Attribute } from '../../attributes/v1/attribute'; +import { ResourceValue } from './resourcevalue'; + +/** CPU stores resource units and cpu config attributes */ +export interface CPU { + $type: 'akash.base.resources.v1.CPU'; + units: ResourceValue | undefined; + attributes: Attribute[]; +} + +function createBaseCPU(): CPU { + return { + $type: 'akash.base.resources.v1.CPU', + units: undefined, + attributes: [], + }; +} + +export const CPU = { + $type: 'akash.base.resources.v1.CPU' as const, + + encode(message: CPU, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.units !== undefined) { + ResourceValue.encode(message.units, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): CPU { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseCPU(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.units = ResourceValue.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): CPU { + return { + $type: CPU.$type, + units: isSet(object.units) + ? ResourceValue.fromJSON(object.units) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: CPU): unknown { + const obj: any = {}; + if (message.units !== undefined) { + obj.units = ResourceValue.toJSON(message.units); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): CPU { + return CPU.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): CPU { + const message = createBaseCPU(); + message.units = + object.units !== undefined && object.units !== null + ? ResourceValue.fromPartial(object.units) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(CPU.$type, CPU); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/base/resources/v1/endpoint.ts b/ts/src/generated/akash/base/resources/v1/endpoint.ts new file mode 100644 index 00000000..6dc10379 --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1/endpoint.ts @@ -0,0 +1,173 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../../typeRegistry'; + +/** Endpoint describes a publicly accessible IP service */ +export interface Endpoint { + $type: 'akash.base.resources.v1.Endpoint'; + kind: Endpoint_Kind; + sequenceNumber: number; +} + +/** This describes how the endpoint is implemented when the lease is deployed */ +export enum Endpoint_Kind { + /** SHARED_HTTP - Describes an endpoint that becomes a Kubernetes Ingress */ + SHARED_HTTP = 0, + /** RANDOM_PORT - Describes an endpoint that becomes a Kubernetes NodePort */ + RANDOM_PORT = 1, + /** LEASED_IP - Describes an endpoint that becomes a leased IP */ + LEASED_IP = 2, + UNRECOGNIZED = -1, +} + +export function endpoint_KindFromJSON(object: any): Endpoint_Kind { + switch (object) { + case 0: + case 'SHARED_HTTP': + return Endpoint_Kind.SHARED_HTTP; + case 1: + case 'RANDOM_PORT': + return Endpoint_Kind.RANDOM_PORT; + case 2: + case 'LEASED_IP': + return Endpoint_Kind.LEASED_IP; + case -1: + case 'UNRECOGNIZED': + default: + return Endpoint_Kind.UNRECOGNIZED; + } +} + +export function endpoint_KindToJSON(object: Endpoint_Kind): string { + switch (object) { + case Endpoint_Kind.SHARED_HTTP: + return 'SHARED_HTTP'; + case Endpoint_Kind.RANDOM_PORT: + return 'RANDOM_PORT'; + case Endpoint_Kind.LEASED_IP: + return 'LEASED_IP'; + case Endpoint_Kind.UNRECOGNIZED: + default: + return 'UNRECOGNIZED'; + } +} + +function createBaseEndpoint(): Endpoint { + return { + $type: 'akash.base.resources.v1.Endpoint', + kind: 0, + sequenceNumber: 0, + }; +} + +export const Endpoint = { + $type: 'akash.base.resources.v1.Endpoint' as const, + + encode( + message: Endpoint, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.kind !== 0) { + writer.uint32(8).int32(message.kind); + } + if (message.sequenceNumber !== 0) { + writer.uint32(16).uint32(message.sequenceNumber); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Endpoint { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseEndpoint(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 8) { + break; + } + + message.kind = reader.int32() as any; + continue; + case 2: + if (tag !== 16) { + break; + } + + message.sequenceNumber = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Endpoint { + return { + $type: Endpoint.$type, + kind: isSet(object.kind) ? endpoint_KindFromJSON(object.kind) : 0, + sequenceNumber: isSet(object.sequenceNumber) + ? globalThis.Number(object.sequenceNumber) + : 0, + }; + }, + + toJSON(message: Endpoint): unknown { + const obj: any = {}; + if (message.kind !== 0) { + obj.kind = endpoint_KindToJSON(message.kind); + } + if (message.sequenceNumber !== 0) { + obj.sequenceNumber = Math.round(message.sequenceNumber); + } + return obj; + }, + + create(base?: DeepPartial): Endpoint { + return Endpoint.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Endpoint { + const message = createBaseEndpoint(); + message.kind = object.kind ?? 0; + message.sequenceNumber = object.sequenceNumber ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Endpoint.$type, Endpoint); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/base/resources/v1/gpu.ts b/ts/src/generated/akash/base/resources/v1/gpu.ts new file mode 100644 index 00000000..ff8cf06a --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1/gpu.ts @@ -0,0 +1,135 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../../typeRegistry'; +import { Attribute } from '../../attributes/v1/attribute'; +import { ResourceValue } from './resourcevalue'; + +/** GPU stores resource units and cpu config attributes */ +export interface GPU { + $type: 'akash.base.resources.v1.GPU'; + units: ResourceValue | undefined; + attributes: Attribute[]; +} + +function createBaseGPU(): GPU { + return { + $type: 'akash.base.resources.v1.GPU', + units: undefined, + attributes: [], + }; +} + +export const GPU = { + $type: 'akash.base.resources.v1.GPU' as const, + + encode(message: GPU, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.units !== undefined) { + ResourceValue.encode(message.units, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GPU { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGPU(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.units = ResourceValue.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): GPU { + return { + $type: GPU.$type, + units: isSet(object.units) + ? ResourceValue.fromJSON(object.units) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GPU): unknown { + const obj: any = {}; + if (message.units !== undefined) { + obj.units = ResourceValue.toJSON(message.units); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GPU { + return GPU.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GPU { + const message = createBaseGPU(); + message.units = + object.units !== undefined && object.units !== null + ? ResourceValue.fromPartial(object.units) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GPU.$type, GPU); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/base/resources/v1/memory.ts b/ts/src/generated/akash/base/resources/v1/memory.ts new file mode 100644 index 00000000..baec0e79 --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1/memory.ts @@ -0,0 +1,138 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../../typeRegistry'; +import { Attribute } from '../../attributes/v1/attribute'; +import { ResourceValue } from './resourcevalue'; + +/** Memory stores resource quantity and memory attributes */ +export interface Memory { + $type: 'akash.base.resources.v1.Memory'; + quantity: ResourceValue | undefined; + attributes: Attribute[]; +} + +function createBaseMemory(): Memory { + return { + $type: 'akash.base.resources.v1.Memory', + quantity: undefined, + attributes: [], + }; +} + +export const Memory = { + $type: 'akash.base.resources.v1.Memory' as const, + + encode( + message: Memory, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.quantity !== undefined) { + ResourceValue.encode(message.quantity, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Memory { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMemory(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.quantity = ResourceValue.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Memory { + return { + $type: Memory.$type, + quantity: isSet(object.quantity) + ? ResourceValue.fromJSON(object.quantity) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Memory): unknown { + const obj: any = {}; + if (message.quantity !== undefined) { + obj.quantity = ResourceValue.toJSON(message.quantity); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Memory { + return Memory.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Memory { + const message = createBaseMemory(); + message.quantity = + object.quantity !== undefined && object.quantity !== null + ? ResourceValue.fromPartial(object.quantity) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Memory.$type, Memory); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/base/resources/v1/resources.ts b/ts/src/generated/akash/base/resources/v1/resources.ts new file mode 100644 index 00000000..412a5c22 --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1/resources.ts @@ -0,0 +1,218 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../../typeRegistry'; +import { CPU } from './cpu'; +import { Endpoint } from './endpoint'; +import { GPU } from './gpu'; +import { Memory } from './memory'; +import { Storage } from './storage'; + +/** + * Resources describes all available resources types for deployment/node etc + * if field is nil resource is not present in the given data-structure + */ +export interface Resources { + $type: 'akash.base.resources.v1.Resources'; + id: number; + cpu: CPU | undefined; + memory: Memory | undefined; + storage: Storage[]; + gpu: GPU | undefined; + endpoints: Endpoint[]; +} + +function createBaseResources(): Resources { + return { + $type: 'akash.base.resources.v1.Resources', + id: 0, + cpu: undefined, + memory: undefined, + storage: [], + gpu: undefined, + endpoints: [], + }; +} + +export const Resources = { + $type: 'akash.base.resources.v1.Resources' as const, + + encode( + message: Resources, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== 0) { + writer.uint32(8).uint32(message.id); + } + if (message.cpu !== undefined) { + CPU.encode(message.cpu, writer.uint32(18).fork()).ldelim(); + } + if (message.memory !== undefined) { + Memory.encode(message.memory, writer.uint32(26).fork()).ldelim(); + } + for (const v of message.storage) { + Storage.encode(v!, writer.uint32(34).fork()).ldelim(); + } + if (message.gpu !== undefined) { + GPU.encode(message.gpu, writer.uint32(42).fork()).ldelim(); + } + for (const v of message.endpoints) { + Endpoint.encode(v!, writer.uint32(50).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Resources { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseResources(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 8) { + break; + } + + message.id = reader.uint32(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.cpu = CPU.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.memory = Memory.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.storage.push(Storage.decode(reader, reader.uint32())); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.gpu = GPU.decode(reader, reader.uint32()); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.endpoints.push(Endpoint.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Resources { + return { + $type: Resources.$type, + id: isSet(object.id) ? globalThis.Number(object.id) : 0, + cpu: isSet(object.cpu) ? CPU.fromJSON(object.cpu) : undefined, + memory: isSet(object.memory) ? Memory.fromJSON(object.memory) : undefined, + storage: globalThis.Array.isArray(object?.storage) + ? object.storage.map((e: any) => Storage.fromJSON(e)) + : [], + gpu: isSet(object.gpu) ? GPU.fromJSON(object.gpu) : undefined, + endpoints: globalThis.Array.isArray(object?.endpoints) + ? object.endpoints.map((e: any) => Endpoint.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Resources): unknown { + const obj: any = {}; + if (message.id !== 0) { + obj.id = Math.round(message.id); + } + if (message.cpu !== undefined) { + obj.cpu = CPU.toJSON(message.cpu); + } + if (message.memory !== undefined) { + obj.memory = Memory.toJSON(message.memory); + } + if (message.storage?.length) { + obj.storage = message.storage.map((e) => Storage.toJSON(e)); + } + if (message.gpu !== undefined) { + obj.gpu = GPU.toJSON(message.gpu); + } + if (message.endpoints?.length) { + obj.endpoints = message.endpoints.map((e) => Endpoint.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Resources { + return Resources.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Resources { + const message = createBaseResources(); + message.id = object.id ?? 0; + message.cpu = + object.cpu !== undefined && object.cpu !== null + ? CPU.fromPartial(object.cpu) + : undefined; + message.memory = + object.memory !== undefined && object.memory !== null + ? Memory.fromPartial(object.memory) + : undefined; + message.storage = object.storage?.map((e) => Storage.fromPartial(e)) || []; + message.gpu = + object.gpu !== undefined && object.gpu !== null + ? GPU.fromPartial(object.gpu) + : undefined; + message.endpoints = + object.endpoints?.map((e) => Endpoint.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Resources.$type, Resources); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/base/resources/v1/resourcevalue.ts b/ts/src/generated/akash/base/resources/v1/resourcevalue.ts new file mode 100644 index 00000000..a4c363bc --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1/resourcevalue.ts @@ -0,0 +1,136 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../../typeRegistry'; + +/** Unit stores cpu, memory and storage metrics */ +export interface ResourceValue { + $type: 'akash.base.resources.v1.ResourceValue'; + val: Uint8Array; +} + +function createBaseResourceValue(): ResourceValue { + return { + $type: 'akash.base.resources.v1.ResourceValue', + val: new Uint8Array(0), + }; +} + +export const ResourceValue = { + $type: 'akash.base.resources.v1.ResourceValue' as const, + + encode( + message: ResourceValue, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.val.length !== 0) { + writer.uint32(10).bytes(message.val); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ResourceValue { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseResourceValue(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.val = reader.bytes(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ResourceValue { + return { + $type: ResourceValue.$type, + val: isSet(object.val) ? bytesFromBase64(object.val) : new Uint8Array(0), + }; + }, + + toJSON(message: ResourceValue): unknown { + const obj: any = {}; + if (message.val.length !== 0) { + obj.val = base64FromBytes(message.val); + } + return obj; + }, + + create(base?: DeepPartial): ResourceValue { + return ResourceValue.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ResourceValue { + const message = createBaseResourceValue(); + message.val = object.val ?? new Uint8Array(0); + return message; + }, +}; + +messageTypeRegistry.set(ResourceValue.$type, ResourceValue); + +function bytesFromBase64(b64: string): Uint8Array { + if ((globalThis as any).Buffer) { + return Uint8Array.from(globalThis.Buffer.from(b64, 'base64')); + } else { + const bin = globalThis.atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; + } +} + +function base64FromBytes(arr: Uint8Array): string { + if ((globalThis as any).Buffer) { + return globalThis.Buffer.from(arr).toString('base64'); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(globalThis.String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join('')); + } +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/base/resources/v1/storage.ts b/ts/src/generated/akash/base/resources/v1/storage.ts new file mode 100644 index 00000000..0cbbeea1 --- /dev/null +++ b/ts/src/generated/akash/base/resources/v1/storage.ts @@ -0,0 +1,155 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../../typeRegistry'; +import { Attribute } from '../../attributes/v1/attribute'; +import { ResourceValue } from './resourcevalue'; + +/** Storage stores resource quantity and storage attributes */ +export interface Storage { + $type: 'akash.base.resources.v1.Storage'; + name: string; + quantity: ResourceValue | undefined; + attributes: Attribute[]; +} + +function createBaseStorage(): Storage { + return { + $type: 'akash.base.resources.v1.Storage', + name: '', + quantity: undefined, + attributes: [], + }; +} + +export const Storage = { + $type: 'akash.base.resources.v1.Storage' as const, + + encode( + message: Storage, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.name !== '') { + writer.uint32(10).string(message.name); + } + if (message.quantity !== undefined) { + ResourceValue.encode(message.quantity, writer.uint32(18).fork()).ldelim(); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Storage { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseStorage(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.quantity = ResourceValue.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Storage { + return { + $type: Storage.$type, + name: isSet(object.name) ? globalThis.String(object.name) : '', + quantity: isSet(object.quantity) + ? ResourceValue.fromJSON(object.quantity) + : undefined, + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Storage): unknown { + const obj: any = {}; + if (message.name !== '') { + obj.name = message.name; + } + if (message.quantity !== undefined) { + obj.quantity = ResourceValue.toJSON(message.quantity); + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Storage { + return Storage.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Storage { + const message = createBaseStorage(); + message.name = object.name ?? ''; + message.quantity = + object.quantity !== undefined && object.quantity !== null + ? ResourceValue.fromPartial(object.quantity) + : undefined; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Storage.$type, Storage); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/authz.ts b/ts/src/generated/akash/deployment/v1beta4/authz.ts new file mode 100644 index 00000000..65a0952b --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/authz.ts @@ -0,0 +1,134 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { Coin } from '../../../cosmos/base/v1beta1/coin'; +import { messageTypeRegistry } from '../../../typeRegistry'; + +/** + * DepositDeploymentAuthorization allows the grantee to deposit up to spend_limit coins from + * the granter's account for a deployment. + */ +export interface DepositDeploymentAuthorization { + $type: 'akash.deployment.v1beta4.DepositDeploymentAuthorization'; + /** + * SpendLimit is the amount the grantee is authorized to spend from the granter's account for + * the purpose of deployment. + */ + spendLimit: Coin | undefined; +} + +function createBaseDepositDeploymentAuthorization(): DepositDeploymentAuthorization { + return { + $type: 'akash.deployment.v1beta4.DepositDeploymentAuthorization', + spendLimit: undefined, + }; +} + +export const DepositDeploymentAuthorization = { + $type: 'akash.deployment.v1beta4.DepositDeploymentAuthorization' as const, + + encode( + message: DepositDeploymentAuthorization, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.spendLimit !== undefined) { + Coin.encode(message.spendLimit, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): DepositDeploymentAuthorization { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDepositDeploymentAuthorization(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.spendLimit = Coin.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): DepositDeploymentAuthorization { + return { + $type: DepositDeploymentAuthorization.$type, + spendLimit: isSet(object.spendLimit) + ? Coin.fromJSON(object.spendLimit) + : undefined, + }; + }, + + toJSON(message: DepositDeploymentAuthorization): unknown { + const obj: any = {}; + if (message.spendLimit !== undefined) { + obj.spendLimit = Coin.toJSON(message.spendLimit); + } + return obj; + }, + + create( + base?: DeepPartial, + ): DepositDeploymentAuthorization { + return DepositDeploymentAuthorization.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): DepositDeploymentAuthorization { + const message = createBaseDepositDeploymentAuthorization(); + message.spendLimit = + object.spendLimit !== undefined && object.spendLimit !== null + ? Coin.fromPartial(object.spendLimit) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + DepositDeploymentAuthorization.$type, + DepositDeploymentAuthorization, +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/deployment.ts b/ts/src/generated/akash/deployment/v1beta4/deployment.ts new file mode 100644 index 00000000..18d23cea --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/deployment.ts @@ -0,0 +1,456 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; + +/** DeploymentID stores owner and sequence number */ +export interface DeploymentID { + $type: 'akash.deployment.v1beta4.DeploymentID'; + owner: string; + dseq: Long; +} + +/** Deployment stores deploymentID, state and version details */ +export interface Deployment { + $type: 'akash.deployment.v1beta4.Deployment'; + deploymentId: DeploymentID | undefined; + state: Deployment_State; + version: Uint8Array; + createdAt: Long; +} + +/** State is an enum which refers to state of deployment */ +export enum Deployment_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** active - DeploymentActive denotes state for deployment active */ + active = 1, + /** closed - DeploymentClosed denotes state for deployment closed */ + closed = 2, + UNRECOGNIZED = -1, +} + +export function deployment_StateFromJSON(object: any): Deployment_State { + switch (object) { + case 0: + case 'invalid': + return Deployment_State.invalid; + case 1: + case 'active': + return Deployment_State.active; + case 2: + case 'closed': + return Deployment_State.closed; + case -1: + case 'UNRECOGNIZED': + default: + return Deployment_State.UNRECOGNIZED; + } +} + +export function deployment_StateToJSON(object: Deployment_State): string { + switch (object) { + case Deployment_State.invalid: + return 'invalid'; + case Deployment_State.active: + return 'active'; + case Deployment_State.closed: + return 'closed'; + case Deployment_State.UNRECOGNIZED: + default: + return 'UNRECOGNIZED'; + } +} + +/** DeploymentFilters defines filters used to filter deployments */ +export interface DeploymentFilters { + $type: 'akash.deployment.v1beta4.DeploymentFilters'; + owner: string; + dseq: Long; + state: string; +} + +function createBaseDeploymentID(): DeploymentID { + return { + $type: 'akash.deployment.v1beta4.DeploymentID', + owner: '', + dseq: Long.UZERO, + }; +} + +export const DeploymentID = { + $type: 'akash.deployment.v1beta4.DeploymentID' as const, + + encode( + message: DeploymentID, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentID { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDeploymentID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = reader.uint64() as Long; + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): DeploymentID { + return { + $type: DeploymentID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + }; + }, + + toJSON(message: DeploymentID): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): DeploymentID { + return DeploymentID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DeploymentID { + const message = createBaseDeploymentID(); + message.owner = object.owner ?? ''; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + return message; + }, +}; + +messageTypeRegistry.set(DeploymentID.$type, DeploymentID); + +function createBaseDeployment(): Deployment { + return { + $type: 'akash.deployment.v1beta4.Deployment', + deploymentId: undefined, + state: 0, + version: new Uint8Array(0), + createdAt: Long.ZERO, + }; +} + +export const Deployment = { + $type: 'akash.deployment.v1beta4.Deployment' as const, + + encode( + message: Deployment, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.deploymentId !== undefined) { + DeploymentID.encode( + message.deploymentId, + writer.uint32(10).fork(), + ).ldelim(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.version.length !== 0) { + writer.uint32(26).bytes(message.version); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Deployment { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.deploymentId = DeploymentID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.version = reader.bytes(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = reader.int64() as Long; + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Deployment { + return { + $type: Deployment.$type, + deploymentId: isSet(object.deploymentId) + ? DeploymentID.fromJSON(object.deploymentId) + : undefined, + state: isSet(object.state) ? deployment_StateFromJSON(object.state) : 0, + version: isSet(object.version) + ? bytesFromBase64(object.version) + : new Uint8Array(0), + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + }; + }, + + toJSON(message: Deployment): unknown { + const obj: any = {}; + if (message.deploymentId !== undefined) { + obj.deploymentId = DeploymentID.toJSON(message.deploymentId); + } + if (message.state !== 0) { + obj.state = deployment_StateToJSON(message.state); + } + if (message.version.length !== 0) { + obj.version = base64FromBytes(message.version); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): Deployment { + return Deployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Deployment { + const message = createBaseDeployment(); + message.deploymentId = + object.deploymentId !== undefined && object.deploymentId !== null + ? DeploymentID.fromPartial(object.deploymentId) + : undefined; + message.state = object.state ?? 0; + message.version = object.version ?? new Uint8Array(0); + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + return message; + }, +}; + +messageTypeRegistry.set(Deployment.$type, Deployment); + +function createBaseDeploymentFilters(): DeploymentFilters { + return { + $type: 'akash.deployment.v1beta4.DeploymentFilters', + owner: '', + dseq: Long.UZERO, + state: '', + }; +} + +export const DeploymentFilters = { + $type: 'akash.deployment.v1beta4.DeploymentFilters' as const, + + encode( + message: DeploymentFilters, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq); + } + if (message.state !== '') { + writer.uint32(26).string(message.state); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): DeploymentFilters { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseDeploymentFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = reader.uint64() as Long; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): DeploymentFilters { + return { + $type: DeploymentFilters.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + state: isSet(object.state) ? globalThis.String(object.state) : '', + }; + }, + + toJSON(message: DeploymentFilters): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.state !== '') { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): DeploymentFilters { + return DeploymentFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): DeploymentFilters { + const message = createBaseDeploymentFilters(); + message.owner = object.owner ?? ''; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.state = object.state ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(DeploymentFilters.$type, DeploymentFilters); + +function bytesFromBase64(b64: string): Uint8Array { + if ((globalThis as any).Buffer) { + return Uint8Array.from(globalThis.Buffer.from(b64, 'base64')); + } else { + const bin = globalThis.atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; + } +} + +function base64FromBytes(arr: Uint8Array): string { + if ((globalThis as any).Buffer) { + return globalThis.Buffer.from(arr).toString('base64'); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(globalThis.String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join('')); + } +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/deploymentmsg.ts b/ts/src/generated/akash/deployment/v1beta4/deploymentmsg.ts new file mode 100644 index 00000000..20945d0c --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/deploymentmsg.ts @@ -0,0 +1,788 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { Coin } from '../../../cosmos/base/v1beta1/coin'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { DeploymentID } from './deployment'; +import { GroupSpec } from './groupspec'; + +/** MsgCreateDeployment defines an SDK message for creating deployment */ +export interface MsgCreateDeployment { + $type: 'akash.deployment.v1beta4.MsgCreateDeployment'; + id: DeploymentID | undefined; + groups: GroupSpec[]; + version: Uint8Array; + deposit: Coin | undefined; + /** Depositor pays for the deposit */ + depositor: string; +} + +/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ +export interface MsgCreateDeploymentResponse { + $type: 'akash.deployment.v1beta4.MsgCreateDeploymentResponse'; +} + +/** MsgDepositDeployment deposits more funds into the deposit account */ +export interface MsgDepositDeployment { + $type: 'akash.deployment.v1beta4.MsgDepositDeployment'; + id: DeploymentID | undefined; + amount: Coin | undefined; + /** Depositor pays for the deposit */ + depositor: string; +} + +/** MsgCreateDeploymentResponse defines the Msg/CreateDeployment response type. */ +export interface MsgDepositDeploymentResponse { + $type: 'akash.deployment.v1beta4.MsgDepositDeploymentResponse'; +} + +/** MsgUpdateDeployment defines an SDK message for updating deployment */ +export interface MsgUpdateDeployment { + $type: 'akash.deployment.v1beta4.MsgUpdateDeployment'; + id: DeploymentID | undefined; + version: Uint8Array; +} + +/** MsgUpdateDeploymentResponse defines the Msg/UpdateDeployment response type. */ +export interface MsgUpdateDeploymentResponse { + $type: 'akash.deployment.v1beta4.MsgUpdateDeploymentResponse'; +} + +/** MsgCloseDeployment defines an SDK message for closing deployment */ +export interface MsgCloseDeployment { + $type: 'akash.deployment.v1beta4.MsgCloseDeployment'; + id: DeploymentID | undefined; +} + +/** MsgCloseDeploymentResponse defines the Msg/CloseDeployment response type. */ +export interface MsgCloseDeploymentResponse { + $type: 'akash.deployment.v1beta4.MsgCloseDeploymentResponse'; +} + +function createBaseMsgCreateDeployment(): MsgCreateDeployment { + return { + $type: 'akash.deployment.v1beta4.MsgCreateDeployment', + id: undefined, + groups: [], + version: new Uint8Array(0), + deposit: undefined, + depositor: '', + }; +} + +export const MsgCreateDeployment = { + $type: 'akash.deployment.v1beta4.MsgCreateDeployment' as const, + + encode( + message: MsgCreateDeployment, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.groups) { + GroupSpec.encode(v!, writer.uint32(18).fork()).ldelim(); + } + if (message.version.length !== 0) { + writer.uint32(26).bytes(message.version); + } + if (message.deposit !== undefined) { + Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); + } + if (message.depositor !== '') { + writer.uint32(42).string(message.depositor); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateDeployment { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.groups.push(GroupSpec.decode(reader, reader.uint32())); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.version = reader.bytes(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.deposit = Coin.decode(reader, reader.uint32()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.depositor = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCreateDeployment { + return { + $type: MsgCreateDeployment.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + groups: globalThis.Array.isArray(object?.groups) + ? object.groups.map((e: any) => GroupSpec.fromJSON(e)) + : [], + version: isSet(object.version) + ? bytesFromBase64(object.version) + : new Uint8Array(0), + deposit: isSet(object.deposit) + ? Coin.fromJSON(object.deposit) + : undefined, + depositor: isSet(object.depositor) + ? globalThis.String(object.depositor) + : '', + }; + }, + + toJSON(message: MsgCreateDeployment): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + if (message.groups?.length) { + obj.groups = message.groups.map((e) => GroupSpec.toJSON(e)); + } + if (message.version.length !== 0) { + obj.version = base64FromBytes(message.version); + } + if (message.deposit !== undefined) { + obj.deposit = Coin.toJSON(message.deposit); + } + if (message.depositor !== '') { + obj.depositor = message.depositor; + } + return obj; + }, + + create(base?: DeepPartial): MsgCreateDeployment { + return MsgCreateDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCreateDeployment { + const message = createBaseMsgCreateDeployment(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + message.groups = object.groups?.map((e) => GroupSpec.fromPartial(e)) || []; + message.version = object.version ?? new Uint8Array(0); + message.deposit = + object.deposit !== undefined && object.deposit !== null + ? Coin.fromPartial(object.deposit) + : undefined; + message.depositor = object.depositor ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateDeployment.$type, MsgCreateDeployment); + +function createBaseMsgCreateDeploymentResponse(): MsgCreateDeploymentResponse { + return { $type: 'akash.deployment.v1beta4.MsgCreateDeploymentResponse' }; +} + +export const MsgCreateDeploymentResponse = { + $type: 'akash.deployment.v1beta4.MsgCreateDeploymentResponse' as const, + + encode( + _: MsgCreateDeploymentResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgCreateDeploymentResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCreateDeploymentResponse { + return { $type: MsgCreateDeploymentResponse.$type }; + }, + + toJSON(_: MsgCreateDeploymentResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgCreateDeploymentResponse { + return MsgCreateDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgCreateDeploymentResponse { + const message = createBaseMsgCreateDeploymentResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgCreateDeploymentResponse.$type, + MsgCreateDeploymentResponse, +); + +function createBaseMsgDepositDeployment(): MsgDepositDeployment { + return { + $type: 'akash.deployment.v1beta4.MsgDepositDeployment', + id: undefined, + amount: undefined, + depositor: '', + }; +} + +export const MsgDepositDeployment = { + $type: 'akash.deployment.v1beta4.MsgDepositDeployment' as const, + + encode( + message: MsgDepositDeployment, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + if (message.amount !== undefined) { + Coin.encode(message.amount, writer.uint32(18).fork()).ldelim(); + } + if (message.depositor !== '') { + writer.uint32(26).string(message.depositor); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgDepositDeployment { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDepositDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.amount = Coin.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.depositor = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgDepositDeployment { + return { + $type: MsgDepositDeployment.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + amount: isSet(object.amount) ? Coin.fromJSON(object.amount) : undefined, + depositor: isSet(object.depositor) + ? globalThis.String(object.depositor) + : '', + }; + }, + + toJSON(message: MsgDepositDeployment): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + if (message.amount !== undefined) { + obj.amount = Coin.toJSON(message.amount); + } + if (message.depositor !== '') { + obj.depositor = message.depositor; + } + return obj; + }, + + create(base?: DeepPartial): MsgDepositDeployment { + return MsgDepositDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgDepositDeployment { + const message = createBaseMsgDepositDeployment(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + message.amount = + object.amount !== undefined && object.amount !== null + ? Coin.fromPartial(object.amount) + : undefined; + message.depositor = object.depositor ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(MsgDepositDeployment.$type, MsgDepositDeployment); + +function createBaseMsgDepositDeploymentResponse(): MsgDepositDeploymentResponse { + return { $type: 'akash.deployment.v1beta4.MsgDepositDeploymentResponse' }; +} + +export const MsgDepositDeploymentResponse = { + $type: 'akash.deployment.v1beta4.MsgDepositDeploymentResponse' as const, + + encode( + _: MsgDepositDeploymentResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgDepositDeploymentResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDepositDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgDepositDeploymentResponse { + return { $type: MsgDepositDeploymentResponse.$type }; + }, + + toJSON(_: MsgDepositDeploymentResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgDepositDeploymentResponse { + return MsgDepositDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgDepositDeploymentResponse { + const message = createBaseMsgDepositDeploymentResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgDepositDeploymentResponse.$type, + MsgDepositDeploymentResponse, +); + +function createBaseMsgUpdateDeployment(): MsgUpdateDeployment { + return { + $type: 'akash.deployment.v1beta4.MsgUpdateDeployment', + id: undefined, + version: new Uint8Array(0), + }; +} + +export const MsgUpdateDeployment = { + $type: 'akash.deployment.v1beta4.MsgUpdateDeployment' as const, + + encode( + message: MsgUpdateDeployment, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + if (message.version.length !== 0) { + writer.uint32(26).bytes(message.version); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgUpdateDeployment { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.version = reader.bytes(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgUpdateDeployment { + return { + $type: MsgUpdateDeployment.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + version: isSet(object.version) + ? bytesFromBase64(object.version) + : new Uint8Array(0), + }; + }, + + toJSON(message: MsgUpdateDeployment): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + if (message.version.length !== 0) { + obj.version = base64FromBytes(message.version); + } + return obj; + }, + + create(base?: DeepPartial): MsgUpdateDeployment { + return MsgUpdateDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgUpdateDeployment { + const message = createBaseMsgUpdateDeployment(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + message.version = object.version ?? new Uint8Array(0); + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateDeployment.$type, MsgUpdateDeployment); + +function createBaseMsgUpdateDeploymentResponse(): MsgUpdateDeploymentResponse { + return { $type: 'akash.deployment.v1beta4.MsgUpdateDeploymentResponse' }; +} + +export const MsgUpdateDeploymentResponse = { + $type: 'akash.deployment.v1beta4.MsgUpdateDeploymentResponse' as const, + + encode( + _: MsgUpdateDeploymentResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgUpdateDeploymentResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgUpdateDeploymentResponse { + return { $type: MsgUpdateDeploymentResponse.$type }; + }, + + toJSON(_: MsgUpdateDeploymentResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgUpdateDeploymentResponse { + return MsgUpdateDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgUpdateDeploymentResponse { + const message = createBaseMsgUpdateDeploymentResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgUpdateDeploymentResponse.$type, + MsgUpdateDeploymentResponse, +); + +function createBaseMsgCloseDeployment(): MsgCloseDeployment { + return { + $type: 'akash.deployment.v1beta4.MsgCloseDeployment', + id: undefined, + }; +} + +export const MsgCloseDeployment = { + $type: 'akash.deployment.v1beta4.MsgCloseDeployment' as const, + + encode( + message: MsgCloseDeployment, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseDeployment { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCloseDeployment { + return { + $type: MsgCloseDeployment.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgCloseDeployment): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgCloseDeployment { + return MsgCloseDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCloseDeployment { + const message = createBaseMsgCloseDeployment(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseDeployment.$type, MsgCloseDeployment); + +function createBaseMsgCloseDeploymentResponse(): MsgCloseDeploymentResponse { + return { $type: 'akash.deployment.v1beta4.MsgCloseDeploymentResponse' }; +} + +export const MsgCloseDeploymentResponse = { + $type: 'akash.deployment.v1beta4.MsgCloseDeploymentResponse' as const, + + encode( + _: MsgCloseDeploymentResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgCloseDeploymentResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCloseDeploymentResponse { + return { $type: MsgCloseDeploymentResponse.$type }; + }, + + toJSON(_: MsgCloseDeploymentResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgCloseDeploymentResponse { + return MsgCloseDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgCloseDeploymentResponse { + const message = createBaseMsgCloseDeploymentResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgCloseDeploymentResponse.$type, + MsgCloseDeploymentResponse, +); + +function bytesFromBase64(b64: string): Uint8Array { + if ((globalThis as any).Buffer) { + return Uint8Array.from(globalThis.Buffer.from(b64, 'base64')); + } else { + const bin = globalThis.atob(b64); + const arr = new Uint8Array(bin.length); + for (let i = 0; i < bin.length; ++i) { + arr[i] = bin.charCodeAt(i); + } + return arr; + } +} + +function base64FromBytes(arr: Uint8Array): string { + if ((globalThis as any).Buffer) { + return globalThis.Buffer.from(arr).toString('base64'); + } else { + const bin: string[] = []; + arr.forEach((byte) => { + bin.push(globalThis.String.fromCharCode(byte)); + }); + return globalThis.btoa(bin.join('')); + } +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/genesis.ts b/ts/src/generated/akash/deployment/v1beta4/genesis.ts new file mode 100644 index 00000000..c9dc9e40 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/genesis.ts @@ -0,0 +1,242 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Deployment } from './deployment'; +import { Group } from './group'; +import { Params } from './params'; + +/** GenesisDeployment defines the basic genesis state used by deployment module */ +export interface GenesisDeployment { + $type: 'akash.deployment.v1beta4.GenesisDeployment'; + deployment: Deployment | undefined; + groups: Group[]; +} + +/** GenesisState stores slice of genesis deployment instance */ +export interface GenesisState { + $type: 'akash.deployment.v1beta4.GenesisState'; + deployments: GenesisDeployment[]; + params: Params | undefined; +} + +function createBaseGenesisDeployment(): GenesisDeployment { + return { + $type: 'akash.deployment.v1beta4.GenesisDeployment', + deployment: undefined, + groups: [], + }; +} + +export const GenesisDeployment = { + $type: 'akash.deployment.v1beta4.GenesisDeployment' as const, + + encode( + message: GenesisDeployment, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.deployment !== undefined) { + Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.groups) { + Group.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GenesisDeployment { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisDeployment(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.deployment = Deployment.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.groups.push(Group.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisDeployment { + return { + $type: GenesisDeployment.$type, + deployment: isSet(object.deployment) + ? Deployment.fromJSON(object.deployment) + : undefined, + groups: globalThis.Array.isArray(object?.groups) + ? object.groups.map((e: any) => Group.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisDeployment): unknown { + const obj: any = {}; + if (message.deployment !== undefined) { + obj.deployment = Deployment.toJSON(message.deployment); + } + if (message.groups?.length) { + obj.groups = message.groups.map((e) => Group.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GenesisDeployment { + return GenesisDeployment.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisDeployment { + const message = createBaseGenesisDeployment(); + message.deployment = + object.deployment !== undefined && object.deployment !== null + ? Deployment.fromPartial(object.deployment) + : undefined; + message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisDeployment.$type, GenesisDeployment); + +function createBaseGenesisState(): GenesisState { + return { + $type: 'akash.deployment.v1beta4.GenesisState', + deployments: [], + params: undefined, + }; +} + +export const GenesisState = { + $type: 'akash.deployment.v1beta4.GenesisState' as const, + + encode( + message: GenesisState, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.deployments) { + GenesisDeployment.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.deployments.push( + GenesisDeployment.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + deployments: globalThis.Array.isArray(object?.deployments) + ? object.deployments.map((e: any) => GenesisDeployment.fromJSON(e)) + : [], + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.deployments?.length) { + obj.deployments = message.deployments.map((e) => + GenesisDeployment.toJSON(e), + ); + } + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.deployments = + object.deployments?.map((e) => GenesisDeployment.fromPartial(e)) || []; + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/group.ts b/ts/src/generated/akash/deployment/v1beta4/group.ts new file mode 100644 index 00000000..015f2a46 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/group.ts @@ -0,0 +1,233 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { GroupID } from './groupid'; +import { GroupSpec } from './groupspec'; + +/** Group stores group id, state and specifications of group */ +export interface Group { + $type: 'akash.deployment.v1beta4.Group'; + groupId: GroupID | undefined; + state: Group_State; + groupSpec: GroupSpec | undefined; + createdAt: Long; +} + +/** State is an enum which refers to state of group */ +export enum Group_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** open - GroupOpen denotes state for group open */ + open = 1, + /** paused - GroupOrdered denotes state for group ordered */ + paused = 2, + /** insufficient_funds - GroupInsufficientFunds denotes state for group insufficient_funds */ + insufficient_funds = 3, + /** closed - GroupClosed denotes state for group closed */ + closed = 4, + UNRECOGNIZED = -1, +} + +export function group_StateFromJSON(object: any): Group_State { + switch (object) { + case 0: + case 'invalid': + return Group_State.invalid; + case 1: + case 'open': + return Group_State.open; + case 2: + case 'paused': + return Group_State.paused; + case 3: + case 'insufficient_funds': + return Group_State.insufficient_funds; + case 4: + case 'closed': + return Group_State.closed; + case -1: + case 'UNRECOGNIZED': + default: + return Group_State.UNRECOGNIZED; + } +} + +export function group_StateToJSON(object: Group_State): string { + switch (object) { + case Group_State.invalid: + return 'invalid'; + case Group_State.open: + return 'open'; + case Group_State.paused: + return 'paused'; + case Group_State.insufficient_funds: + return 'insufficient_funds'; + case Group_State.closed: + return 'closed'; + case Group_State.UNRECOGNIZED: + default: + return 'UNRECOGNIZED'; + } +} + +function createBaseGroup(): Group { + return { + $type: 'akash.deployment.v1beta4.Group', + groupId: undefined, + state: 0, + groupSpec: undefined, + createdAt: Long.ZERO, + }; +} + +export const Group = { + $type: 'akash.deployment.v1beta4.Group' as const, + + encode(message: Group, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.groupId !== undefined) { + GroupID.encode(message.groupId, writer.uint32(10).fork()).ldelim(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.groupSpec !== undefined) { + GroupSpec.encode(message.groupSpec, writer.uint32(26).fork()).ldelim(); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Group { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.groupId = GroupID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.groupSpec = GroupSpec.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = reader.int64() as Long; + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Group { + return { + $type: Group.$type, + groupId: isSet(object.groupId) + ? GroupID.fromJSON(object.groupId) + : undefined, + state: isSet(object.state) ? group_StateFromJSON(object.state) : 0, + groupSpec: isSet(object.groupSpec) + ? GroupSpec.fromJSON(object.groupSpec) + : undefined, + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + }; + }, + + toJSON(message: Group): unknown { + const obj: any = {}; + if (message.groupId !== undefined) { + obj.groupId = GroupID.toJSON(message.groupId); + } + if (message.state !== 0) { + obj.state = group_StateToJSON(message.state); + } + if (message.groupSpec !== undefined) { + obj.groupSpec = GroupSpec.toJSON(message.groupSpec); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): Group { + return Group.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Group { + const message = createBaseGroup(); + message.groupId = + object.groupId !== undefined && object.groupId !== null + ? GroupID.fromPartial(object.groupId) + : undefined; + message.state = object.state ?? 0; + message.groupSpec = + object.groupSpec !== undefined && object.groupSpec !== null + ? GroupSpec.fromPartial(object.groupSpec) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + return message; + }, +}; + +messageTypeRegistry.set(Group.$type, Group); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/groupid.ts b/ts/src/generated/akash/deployment/v1beta4/groupid.ts new file mode 100644 index 00000000..7f165c9c --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/groupid.ts @@ -0,0 +1,148 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; + +/** GroupID stores owner, deployment sequence number and group sequence number */ +export interface GroupID { + $type: 'akash.deployment.v1beta4.GroupID'; + owner: string; + dseq: Long; + gseq: number; +} + +function createBaseGroupID(): GroupID { + return { + $type: 'akash.deployment.v1beta4.GroupID', + owner: '', + dseq: Long.UZERO, + gseq: 0, + }; +} + +export const GroupID = { + $type: 'akash.deployment.v1beta4.GroupID' as const, + + encode( + message: GroupID, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GroupID { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGroupID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = reader.uint64() as Long; + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): GroupID { + return { + $type: GroupID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + }; + }, + + toJSON(message: GroupID): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + return obj; + }, + + create(base?: DeepPartial): GroupID { + return GroupID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GroupID { + const message = createBaseGroupID(); + message.owner = object.owner ?? ''; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(GroupID.$type, GroupID); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/groupmsg.ts b/ts/src/generated/akash/deployment/v1beta4/groupmsg.ts new file mode 100644 index 00000000..488d2ad4 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/groupmsg.ts @@ -0,0 +1,443 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { GroupID } from './groupid'; + +/** MsgCloseGroup defines SDK message to close a single Group within a Deployment. */ +export interface MsgCloseGroup { + $type: 'akash.deployment.v1beta4.MsgCloseGroup'; + id: GroupID | undefined; +} + +/** MsgCloseGroupResponse defines the Msg/CloseGroup response type. */ +export interface MsgCloseGroupResponse { + $type: 'akash.deployment.v1beta4.MsgCloseGroupResponse'; +} + +/** MsgPauseGroup defines SDK message to close a single Group within a Deployment. */ +export interface MsgPauseGroup { + $type: 'akash.deployment.v1beta4.MsgPauseGroup'; + id: GroupID | undefined; +} + +/** MsgPauseGroupResponse defines the Msg/PauseGroup response type. */ +export interface MsgPauseGroupResponse { + $type: 'akash.deployment.v1beta4.MsgPauseGroupResponse'; +} + +/** MsgStartGroup defines SDK message to close a single Group within a Deployment. */ +export interface MsgStartGroup { + $type: 'akash.deployment.v1beta4.MsgStartGroup'; + id: GroupID | undefined; +} + +/** MsgStartGroupResponse defines the Msg/StartGroup response type. */ +export interface MsgStartGroupResponse { + $type: 'akash.deployment.v1beta4.MsgStartGroupResponse'; +} + +function createBaseMsgCloseGroup(): MsgCloseGroup { + return { $type: 'akash.deployment.v1beta4.MsgCloseGroup', id: undefined }; +} + +export const MsgCloseGroup = { + $type: 'akash.deployment.v1beta4.MsgCloseGroup' as const, + + encode( + message: MsgCloseGroup, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseGroup { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCloseGroup { + return { + $type: MsgCloseGroup.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgCloseGroup): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgCloseGroup { + return MsgCloseGroup.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCloseGroup { + const message = createBaseMsgCloseGroup(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseGroup.$type, MsgCloseGroup); + +function createBaseMsgCloseGroupResponse(): MsgCloseGroupResponse { + return { $type: 'akash.deployment.v1beta4.MsgCloseGroupResponse' }; +} + +export const MsgCloseGroupResponse = { + $type: 'akash.deployment.v1beta4.MsgCloseGroupResponse' as const, + + encode( + _: MsgCloseGroupResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgCloseGroupResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseGroupResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCloseGroupResponse { + return { $type: MsgCloseGroupResponse.$type }; + }, + + toJSON(_: MsgCloseGroupResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCloseGroupResponse { + return MsgCloseGroupResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCloseGroupResponse { + const message = createBaseMsgCloseGroupResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseGroupResponse.$type, MsgCloseGroupResponse); + +function createBaseMsgPauseGroup(): MsgPauseGroup { + return { $type: 'akash.deployment.v1beta4.MsgPauseGroup', id: undefined }; +} + +export const MsgPauseGroup = { + $type: 'akash.deployment.v1beta4.MsgPauseGroup' as const, + + encode( + message: MsgPauseGroup, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgPauseGroup { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgPauseGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgPauseGroup { + return { + $type: MsgPauseGroup.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgPauseGroup): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgPauseGroup { + return MsgPauseGroup.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgPauseGroup { + const message = createBaseMsgPauseGroup(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgPauseGroup.$type, MsgPauseGroup); + +function createBaseMsgPauseGroupResponse(): MsgPauseGroupResponse { + return { $type: 'akash.deployment.v1beta4.MsgPauseGroupResponse' }; +} + +export const MsgPauseGroupResponse = { + $type: 'akash.deployment.v1beta4.MsgPauseGroupResponse' as const, + + encode( + _: MsgPauseGroupResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgPauseGroupResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgPauseGroupResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgPauseGroupResponse { + return { $type: MsgPauseGroupResponse.$type }; + }, + + toJSON(_: MsgPauseGroupResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgPauseGroupResponse { + return MsgPauseGroupResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgPauseGroupResponse { + const message = createBaseMsgPauseGroupResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgPauseGroupResponse.$type, MsgPauseGroupResponse); + +function createBaseMsgStartGroup(): MsgStartGroup { + return { $type: 'akash.deployment.v1beta4.MsgStartGroup', id: undefined }; +} + +export const MsgStartGroup = { + $type: 'akash.deployment.v1beta4.MsgStartGroup' as const, + + encode( + message: MsgStartGroup, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgStartGroup { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgStartGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgStartGroup { + return { + $type: MsgStartGroup.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: MsgStartGroup): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): MsgStartGroup { + return MsgStartGroup.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgStartGroup { + const message = createBaseMsgStartGroup(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgStartGroup.$type, MsgStartGroup); + +function createBaseMsgStartGroupResponse(): MsgStartGroupResponse { + return { $type: 'akash.deployment.v1beta4.MsgStartGroupResponse' }; +} + +export const MsgStartGroupResponse = { + $type: 'akash.deployment.v1beta4.MsgStartGroupResponse' as const, + + encode( + _: MsgStartGroupResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgStartGroupResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgStartGroupResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgStartGroupResponse { + return { $type: MsgStartGroupResponse.$type }; + }, + + toJSON(_: MsgStartGroupResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgStartGroupResponse { + return MsgStartGroupResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgStartGroupResponse { + const message = createBaseMsgStartGroupResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgStartGroupResponse.$type, MsgStartGroupResponse); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/groupspec.ts b/ts/src/generated/akash/deployment/v1beta4/groupspec.ts new file mode 100644 index 00000000..c6ce6dde --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/groupspec.ts @@ -0,0 +1,161 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { PlacementRequirements } from '../../base/attributes/v1/attribute'; +import { ResourceUnit } from './resourceunit'; + +/** GroupSpec stores group specifications */ +export interface GroupSpec { + $type: 'akash.deployment.v1beta4.GroupSpec'; + name: string; + requirements: PlacementRequirements | undefined; + resources: ResourceUnit[]; +} + +function createBaseGroupSpec(): GroupSpec { + return { + $type: 'akash.deployment.v1beta4.GroupSpec', + name: '', + requirements: undefined, + resources: [], + }; +} + +export const GroupSpec = { + $type: 'akash.deployment.v1beta4.GroupSpec' as const, + + encode( + message: GroupSpec, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.name !== '') { + writer.uint32(10).string(message.name); + } + if (message.requirements !== undefined) { + PlacementRequirements.encode( + message.requirements, + writer.uint32(18).fork(), + ).ldelim(); + } + for (const v of message.resources) { + ResourceUnit.encode(v!, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GroupSpec { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGroupSpec(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.requirements = PlacementRequirements.decode( + reader, + reader.uint32(), + ); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.resources.push(ResourceUnit.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): GroupSpec { + return { + $type: GroupSpec.$type, + name: isSet(object.name) ? globalThis.String(object.name) : '', + requirements: isSet(object.requirements) + ? PlacementRequirements.fromJSON(object.requirements) + : undefined, + resources: globalThis.Array.isArray(object?.resources) + ? object.resources.map((e: any) => ResourceUnit.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GroupSpec): unknown { + const obj: any = {}; + if (message.name !== '') { + obj.name = message.name; + } + if (message.requirements !== undefined) { + obj.requirements = PlacementRequirements.toJSON(message.requirements); + } + if (message.resources?.length) { + obj.resources = message.resources.map((e) => ResourceUnit.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GroupSpec { + return GroupSpec.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GroupSpec { + const message = createBaseGroupSpec(); + message.name = object.name ?? ''; + message.requirements = + object.requirements !== undefined && object.requirements !== null + ? PlacementRequirements.fromPartial(object.requirements) + : undefined; + message.resources = + object.resources?.map((e) => ResourceUnit.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GroupSpec.$type, GroupSpec); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/params.ts b/ts/src/generated/akash/deployment/v1beta4/params.ts new file mode 100644 index 00000000..c42763d3 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/params.ts @@ -0,0 +1,108 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { Coin } from '../../../cosmos/base/v1beta1/coin'; +import { messageTypeRegistry } from '../../../typeRegistry'; + +/** Params defines the parameters for the x/deployment package */ +export interface Params { + $type: 'akash.deployment.v1beta4.Params'; + minDeposits: Coin[]; +} + +function createBaseParams(): Params { + return { $type: 'akash.deployment.v1beta4.Params', minDeposits: [] }; +} + +export const Params = { + $type: 'akash.deployment.v1beta4.Params' as const, + + encode( + message: Params, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.minDeposits) { + Coin.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Params { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.minDeposits.push(Coin.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Params { + return { + $type: Params.$type, + minDeposits: globalThis.Array.isArray(object?.minDeposits) + ? object.minDeposits.map((e: any) => Coin.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Params): unknown { + const obj: any = {}; + if (message.minDeposits?.length) { + obj.minDeposits = message.minDeposits.map((e) => Coin.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Params { + return Params.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Params { + const message = createBaseParams(); + message.minDeposits = + object.minDeposits?.map((e) => Coin.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Params.$type, Params); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/ts/src/generated/akash/deployment/v1beta4/query.ts b/ts/src/generated/akash/deployment/v1beta4/query.ts new file mode 100644 index 00000000..d62c32c3 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/query.ts @@ -0,0 +1,706 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { + PageRequest, + PageResponse, +} from '../../../cosmos/base/query/v1beta1/pagination'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Account } from '../../escrow/v1beta3/types'; +import { Deployment, DeploymentFilters, DeploymentID } from './deployment'; +import { Group } from './group'; +import { GroupID } from './groupid'; + +/** QueryDeploymentsRequest is request type for the Query/Deployments RPC method */ +export interface QueryDeploymentsRequest { + $type: 'akash.deployment.v1beta4.QueryDeploymentsRequest'; + filters: DeploymentFilters | undefined; + pagination: PageRequest | undefined; +} + +/** QueryDeploymentsResponse is response type for the Query/Deployments RPC method */ +export interface QueryDeploymentsResponse { + $type: 'akash.deployment.v1beta4.QueryDeploymentsResponse'; + deployments: QueryDeploymentResponse[]; + pagination: PageResponse | undefined; +} + +/** QueryDeploymentRequest is request type for the Query/Deployment RPC method */ +export interface QueryDeploymentRequest { + $type: 'akash.deployment.v1beta4.QueryDeploymentRequest'; + id: DeploymentID | undefined; +} + +/** QueryDeploymentResponse is response type for the Query/Deployment RPC method */ +export interface QueryDeploymentResponse { + $type: 'akash.deployment.v1beta4.QueryDeploymentResponse'; + deployment: Deployment | undefined; + groups: Group[]; + escrowAccount: Account | undefined; +} + +/** QueryGroupRequest is request type for the Query/Group RPC method */ +export interface QueryGroupRequest { + $type: 'akash.deployment.v1beta4.QueryGroupRequest'; + id: GroupID | undefined; +} + +/** QueryGroupResponse is response type for the Query/Group RPC method */ +export interface QueryGroupResponse { + $type: 'akash.deployment.v1beta4.QueryGroupResponse'; + group: Group | undefined; +} + +function createBaseQueryDeploymentsRequest(): QueryDeploymentsRequest { + return { + $type: 'akash.deployment.v1beta4.QueryDeploymentsRequest', + filters: undefined, + pagination: undefined, + }; +} + +export const QueryDeploymentsRequest = { + $type: 'akash.deployment.v1beta4.QueryDeploymentsRequest' as const, + + encode( + message: QueryDeploymentsRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.filters !== undefined) { + DeploymentFilters.encode( + message.filters, + writer.uint32(10).fork(), + ).ldelim(); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryDeploymentsRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryDeploymentsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.filters = DeploymentFilters.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryDeploymentsRequest { + return { + $type: QueryDeploymentsRequest.$type, + filters: isSet(object.filters) + ? DeploymentFilters.fromJSON(object.filters) + : undefined, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryDeploymentsRequest): unknown { + const obj: any = {}; + if (message.filters !== undefined) { + obj.filters = DeploymentFilters.toJSON(message.filters); + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryDeploymentsRequest { + return QueryDeploymentsRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryDeploymentsRequest { + const message = createBaseQueryDeploymentsRequest(); + message.filters = + object.filters !== undefined && object.filters !== null + ? DeploymentFilters.fromPartial(object.filters) + : undefined; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryDeploymentsRequest.$type, QueryDeploymentsRequest); + +function createBaseQueryDeploymentsResponse(): QueryDeploymentsResponse { + return { + $type: 'akash.deployment.v1beta4.QueryDeploymentsResponse', + deployments: [], + pagination: undefined, + }; +} + +export const QueryDeploymentsResponse = { + $type: 'akash.deployment.v1beta4.QueryDeploymentsResponse' as const, + + encode( + message: QueryDeploymentsResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.deployments) { + QueryDeploymentResponse.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork(), + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryDeploymentsResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryDeploymentsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.deployments.push( + QueryDeploymentResponse.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryDeploymentsResponse { + return { + $type: QueryDeploymentsResponse.$type, + deployments: globalThis.Array.isArray(object?.deployments) + ? object.deployments.map((e: any) => + QueryDeploymentResponse.fromJSON(e), + ) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryDeploymentsResponse): unknown { + const obj: any = {}; + if (message.deployments?.length) { + obj.deployments = message.deployments.map((e) => + QueryDeploymentResponse.toJSON(e), + ); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create( + base?: DeepPartial, + ): QueryDeploymentsResponse { + return QueryDeploymentsResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryDeploymentsResponse { + const message = createBaseQueryDeploymentsResponse(); + message.deployments = + object.deployments?.map((e) => QueryDeploymentResponse.fromPartial(e)) || + []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set( + QueryDeploymentsResponse.$type, + QueryDeploymentsResponse, +); + +function createBaseQueryDeploymentRequest(): QueryDeploymentRequest { + return { + $type: 'akash.deployment.v1beta4.QueryDeploymentRequest', + id: undefined, + }; +} + +export const QueryDeploymentRequest = { + $type: 'akash.deployment.v1beta4.QueryDeploymentRequest' as const, + + encode( + message: QueryDeploymentRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + DeploymentID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryDeploymentRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryDeploymentRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = DeploymentID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryDeploymentRequest { + return { + $type: QueryDeploymentRequest.$type, + id: isSet(object.id) ? DeploymentID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryDeploymentRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = DeploymentID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryDeploymentRequest { + return QueryDeploymentRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryDeploymentRequest { + const message = createBaseQueryDeploymentRequest(); + message.id = + object.id !== undefined && object.id !== null + ? DeploymentID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryDeploymentRequest.$type, QueryDeploymentRequest); + +function createBaseQueryDeploymentResponse(): QueryDeploymentResponse { + return { + $type: 'akash.deployment.v1beta4.QueryDeploymentResponse', + deployment: undefined, + groups: [], + escrowAccount: undefined, + }; +} + +export const QueryDeploymentResponse = { + $type: 'akash.deployment.v1beta4.QueryDeploymentResponse' as const, + + encode( + message: QueryDeploymentResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.deployment !== undefined) { + Deployment.encode(message.deployment, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.groups) { + Group.encode(v!, writer.uint32(18).fork()).ldelim(); + } + if (message.escrowAccount !== undefined) { + Account.encode(message.escrowAccount, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryDeploymentResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryDeploymentResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.deployment = Deployment.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.groups.push(Group.decode(reader, reader.uint32())); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.escrowAccount = Account.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryDeploymentResponse { + return { + $type: QueryDeploymentResponse.$type, + deployment: isSet(object.deployment) + ? Deployment.fromJSON(object.deployment) + : undefined, + groups: globalThis.Array.isArray(object?.groups) + ? object.groups.map((e: any) => Group.fromJSON(e)) + : [], + escrowAccount: isSet(object.escrowAccount) + ? Account.fromJSON(object.escrowAccount) + : undefined, + }; + }, + + toJSON(message: QueryDeploymentResponse): unknown { + const obj: any = {}; + if (message.deployment !== undefined) { + obj.deployment = Deployment.toJSON(message.deployment); + } + if (message.groups?.length) { + obj.groups = message.groups.map((e) => Group.toJSON(e)); + } + if (message.escrowAccount !== undefined) { + obj.escrowAccount = Account.toJSON(message.escrowAccount); + } + return obj; + }, + + create(base?: DeepPartial): QueryDeploymentResponse { + return QueryDeploymentResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryDeploymentResponse { + const message = createBaseQueryDeploymentResponse(); + message.deployment = + object.deployment !== undefined && object.deployment !== null + ? Deployment.fromPartial(object.deployment) + : undefined; + message.groups = object.groups?.map((e) => Group.fromPartial(e)) || []; + message.escrowAccount = + object.escrowAccount !== undefined && object.escrowAccount !== null + ? Account.fromPartial(object.escrowAccount) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryDeploymentResponse.$type, QueryDeploymentResponse); + +function createBaseQueryGroupRequest(): QueryGroupRequest { + return { $type: 'akash.deployment.v1beta4.QueryGroupRequest', id: undefined }; +} + +export const QueryGroupRequest = { + $type: 'akash.deployment.v1beta4.QueryGroupRequest' as const, + + encode( + message: QueryGroupRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + GroupID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryGroupRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = GroupID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryGroupRequest { + return { + $type: QueryGroupRequest.$type, + id: isSet(object.id) ? GroupID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryGroupRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = GroupID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryGroupRequest { + return QueryGroupRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryGroupRequest { + const message = createBaseQueryGroupRequest(); + message.id = + object.id !== undefined && object.id !== null + ? GroupID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryGroupRequest.$type, QueryGroupRequest); + +function createBaseQueryGroupResponse(): QueryGroupResponse { + return { + $type: 'akash.deployment.v1beta4.QueryGroupResponse', + group: undefined, + }; +} + +export const QueryGroupResponse = { + $type: 'akash.deployment.v1beta4.QueryGroupResponse' as const, + + encode( + message: QueryGroupResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.group !== undefined) { + Group.encode(message.group, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryGroupResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryGroupResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.group = Group.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryGroupResponse { + return { + $type: QueryGroupResponse.$type, + group: isSet(object.group) ? Group.fromJSON(object.group) : undefined, + }; + }, + + toJSON(message: QueryGroupResponse): unknown { + const obj: any = {}; + if (message.group !== undefined) { + obj.group = Group.toJSON(message.group); + } + return obj; + }, + + create(base?: DeepPartial): QueryGroupResponse { + return QueryGroupResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryGroupResponse { + const message = createBaseQueryGroupResponse(); + message.group = + object.group !== undefined && object.group !== null + ? Group.fromPartial(object.group) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryGroupResponse.$type, QueryGroupResponse); + +/** Query defines the gRPC querier service */ +export interface Query { + /** Deployments queries deployments */ + Deployments( + request: QueryDeploymentsRequest, + ): Promise; + /** Deployment queries deployment details */ + Deployment(request: QueryDeploymentRequest): Promise; + /** Group queries group details */ + Group(request: QueryGroupRequest): Promise; +} + +export const QueryServiceName = 'akash.deployment.v1beta4.Query'; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Deployments = this.Deployments.bind(this); + this.Deployment = this.Deployment.bind(this); + this.Group = this.Group.bind(this); + } + Deployments( + request: QueryDeploymentsRequest, + ): Promise { + const data = QueryDeploymentsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Deployments', data); + return promise.then((data) => + QueryDeploymentsResponse.decode(_m0.Reader.create(data)), + ); + } + + Deployment( + request: QueryDeploymentRequest, + ): Promise { + const data = QueryDeploymentRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Deployment', data); + return promise.then((data) => + QueryDeploymentResponse.decode(_m0.Reader.create(data)), + ); + } + + Group(request: QueryGroupRequest): Promise { + const data = QueryGroupRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Group', data); + return promise.then((data) => + QueryGroupResponse.decode(_m0.Reader.create(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/resourceunit.ts b/ts/src/generated/akash/deployment/v1beta4/resourceunit.ts new file mode 100644 index 00000000..a05f351e --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/resourceunit.ts @@ -0,0 +1,155 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { DecCoin } from '../../../cosmos/base/v1beta1/coin'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Resources } from '../../base/resources/v1/resources'; + +/** ResourceUnit extends Resources and adds Count along with the Price */ +export interface ResourceUnit { + $type: 'akash.deployment.v1beta4.ResourceUnit'; + resource: Resources | undefined; + count: number; + price: DecCoin | undefined; +} + +function createBaseResourceUnit(): ResourceUnit { + return { + $type: 'akash.deployment.v1beta4.ResourceUnit', + resource: undefined, + count: 0, + price: undefined, + }; +} + +export const ResourceUnit = { + $type: 'akash.deployment.v1beta4.ResourceUnit' as const, + + encode( + message: ResourceUnit, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.resource !== undefined) { + Resources.encode(message.resource, writer.uint32(10).fork()).ldelim(); + } + if (message.count !== 0) { + writer.uint32(16).uint32(message.count); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ResourceUnit { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseResourceUnit(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.resource = Resources.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.count = reader.uint32(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ResourceUnit { + return { + $type: ResourceUnit.$type, + resource: isSet(object.resource) + ? Resources.fromJSON(object.resource) + : undefined, + count: isSet(object.count) ? globalThis.Number(object.count) : 0, + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + }; + }, + + toJSON(message: ResourceUnit): unknown { + const obj: any = {}; + if (message.resource !== undefined) { + obj.resource = Resources.toJSON(message.resource); + } + if (message.count !== 0) { + obj.count = Math.round(message.count); + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + return obj; + }, + + create(base?: DeepPartial): ResourceUnit { + return ResourceUnit.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ResourceUnit { + const message = createBaseResourceUnit(); + message.resource = + object.resource !== undefined && object.resource !== null + ? Resources.fromPartial(object.resource) + : undefined; + message.count = object.count ?? 0; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ResourceUnit.$type, ResourceUnit); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/deployment/v1beta4/service.grpc-js.ts b/ts/src/generated/akash/deployment/v1beta4/service.grpc-js.ts new file mode 100644 index 00000000..12284a96 --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/service.grpc-js.ts @@ -0,0 +1,348 @@ +/* eslint-disable */ +import { + ChannelCredentials, + Client, + makeGenericClientConstructor, + Metadata, +} from '@grpc/grpc-js'; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from '@grpc/grpc-js'; +import { + MsgCloseDeployment, + MsgCloseDeploymentResponse, + MsgCreateDeployment, + MsgCreateDeploymentResponse, + MsgDepositDeployment, + MsgDepositDeploymentResponse, + MsgUpdateDeployment, + MsgUpdateDeploymentResponse, +} from './deploymentmsg'; +import { + MsgCloseGroup, + MsgCloseGroupResponse, + MsgPauseGroup, + MsgPauseGroupResponse, + MsgStartGroup, + MsgStartGroupResponse, +} from './groupmsg'; + +export const protobufPackage = 'akash.deployment.v1beta4'; + +/** Msg defines the deployment Msg service. */ +export type MsgService = typeof MsgService; +export const MsgService = { + /** CreateDeployment defines a method to create new deployment given proper inputs. */ + createDeployment: { + path: '/akash.deployment.v1beta4.Msg/CreateDeployment', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCreateDeployment) => + Buffer.from(MsgCreateDeployment.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCreateDeployment.decode(value), + responseSerialize: (value: MsgCreateDeploymentResponse) => + Buffer.from(MsgCreateDeploymentResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgCreateDeploymentResponse.decode(value), + }, + /** DepositDeployment deposits more funds into the deployment account */ + depositDeployment: { + path: '/akash.deployment.v1beta4.Msg/DepositDeployment', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgDepositDeployment) => + Buffer.from(MsgDepositDeployment.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgDepositDeployment.decode(value), + responseSerialize: (value: MsgDepositDeploymentResponse) => + Buffer.from(MsgDepositDeploymentResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgDepositDeploymentResponse.decode(value), + }, + /** UpdateDeployment defines a method to update a deployment given proper inputs. */ + updateDeployment: { + path: '/akash.deployment.v1beta4.Msg/UpdateDeployment', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgUpdateDeployment) => + Buffer.from(MsgUpdateDeployment.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgUpdateDeployment.decode(value), + responseSerialize: (value: MsgUpdateDeploymentResponse) => + Buffer.from(MsgUpdateDeploymentResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgUpdateDeploymentResponse.decode(value), + }, + /** CloseDeployment defines a method to close a deployment given proper inputs. */ + closeDeployment: { + path: '/akash.deployment.v1beta4.Msg/CloseDeployment', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCloseDeployment) => + Buffer.from(MsgCloseDeployment.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCloseDeployment.decode(value), + responseSerialize: (value: MsgCloseDeploymentResponse) => + Buffer.from(MsgCloseDeploymentResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgCloseDeploymentResponse.decode(value), + }, + /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ + closeGroup: { + path: '/akash.deployment.v1beta4.Msg/CloseGroup', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCloseGroup) => + Buffer.from(MsgCloseGroup.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCloseGroup.decode(value), + responseSerialize: (value: MsgCloseGroupResponse) => + Buffer.from(MsgCloseGroupResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgCloseGroupResponse.decode(value), + }, + /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ + pauseGroup: { + path: '/akash.deployment.v1beta4.Msg/PauseGroup', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgPauseGroup) => + Buffer.from(MsgPauseGroup.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgPauseGroup.decode(value), + responseSerialize: (value: MsgPauseGroupResponse) => + Buffer.from(MsgPauseGroupResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgPauseGroupResponse.decode(value), + }, + /** StartGroup defines a method to close a group of a deployment given proper inputs. */ + startGroup: { + path: '/akash.deployment.v1beta4.Msg/StartGroup', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgStartGroup) => + Buffer.from(MsgStartGroup.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgStartGroup.decode(value), + responseSerialize: (value: MsgStartGroupResponse) => + Buffer.from(MsgStartGroupResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgStartGroupResponse.decode(value), + }, +} as const; + +export interface MsgServer extends UntypedServiceImplementation { + /** CreateDeployment defines a method to create new deployment given proper inputs. */ + createDeployment: handleUnaryCall< + MsgCreateDeployment, + MsgCreateDeploymentResponse + >; + /** DepositDeployment deposits more funds into the deployment account */ + depositDeployment: handleUnaryCall< + MsgDepositDeployment, + MsgDepositDeploymentResponse + >; + /** UpdateDeployment defines a method to update a deployment given proper inputs. */ + updateDeployment: handleUnaryCall< + MsgUpdateDeployment, + MsgUpdateDeploymentResponse + >; + /** CloseDeployment defines a method to close a deployment given proper inputs. */ + closeDeployment: handleUnaryCall< + MsgCloseDeployment, + MsgCloseDeploymentResponse + >; + /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ + closeGroup: handleUnaryCall; + /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ + pauseGroup: handleUnaryCall; + /** StartGroup defines a method to close a group of a deployment given proper inputs. */ + startGroup: handleUnaryCall; +} + +export interface MsgClient extends Client { + /** CreateDeployment defines a method to create new deployment given proper inputs. */ + createDeployment( + request: MsgCreateDeployment, + callback: ( + error: ServiceError | null, + response: MsgCreateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + createDeployment( + request: MsgCreateDeployment, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCreateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + createDeployment( + request: MsgCreateDeployment, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCreateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + /** DepositDeployment deposits more funds into the deployment account */ + depositDeployment( + request: MsgDepositDeployment, + callback: ( + error: ServiceError | null, + response: MsgDepositDeploymentResponse, + ) => void, + ): ClientUnaryCall; + depositDeployment( + request: MsgDepositDeployment, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgDepositDeploymentResponse, + ) => void, + ): ClientUnaryCall; + depositDeployment( + request: MsgDepositDeployment, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgDepositDeploymentResponse, + ) => void, + ): ClientUnaryCall; + /** UpdateDeployment defines a method to update a deployment given proper inputs. */ + updateDeployment( + request: MsgUpdateDeployment, + callback: ( + error: ServiceError | null, + response: MsgUpdateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + updateDeployment( + request: MsgUpdateDeployment, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgUpdateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + updateDeployment( + request: MsgUpdateDeployment, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgUpdateDeploymentResponse, + ) => void, + ): ClientUnaryCall; + /** CloseDeployment defines a method to close a deployment given proper inputs. */ + closeDeployment( + request: MsgCloseDeployment, + callback: ( + error: ServiceError | null, + response: MsgCloseDeploymentResponse, + ) => void, + ): ClientUnaryCall; + closeDeployment( + request: MsgCloseDeployment, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCloseDeploymentResponse, + ) => void, + ): ClientUnaryCall; + closeDeployment( + request: MsgCloseDeployment, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCloseDeploymentResponse, + ) => void, + ): ClientUnaryCall; + /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ + closeGroup( + request: MsgCloseGroup, + callback: ( + error: ServiceError | null, + response: MsgCloseGroupResponse, + ) => void, + ): ClientUnaryCall; + closeGroup( + request: MsgCloseGroup, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCloseGroupResponse, + ) => void, + ): ClientUnaryCall; + closeGroup( + request: MsgCloseGroup, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCloseGroupResponse, + ) => void, + ): ClientUnaryCall; + /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ + pauseGroup( + request: MsgPauseGroup, + callback: ( + error: ServiceError | null, + response: MsgPauseGroupResponse, + ) => void, + ): ClientUnaryCall; + pauseGroup( + request: MsgPauseGroup, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgPauseGroupResponse, + ) => void, + ): ClientUnaryCall; + pauseGroup( + request: MsgPauseGroup, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgPauseGroupResponse, + ) => void, + ): ClientUnaryCall; + /** StartGroup defines a method to close a group of a deployment given proper inputs. */ + startGroup( + request: MsgStartGroup, + callback: ( + error: ServiceError | null, + response: MsgStartGroupResponse, + ) => void, + ): ClientUnaryCall; + startGroup( + request: MsgStartGroup, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgStartGroupResponse, + ) => void, + ): ClientUnaryCall; + startGroup( + request: MsgStartGroup, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgStartGroupResponse, + ) => void, + ): ClientUnaryCall; +} + +export const MsgClient = makeGenericClientConstructor( + MsgService, + 'akash.deployment.v1beta4.Msg', +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial, + ): MsgClient; + service: typeof MsgService; + serviceName: string; +}; diff --git a/ts/src/generated/akash/deployment/v1beta4/service.ts b/ts/src/generated/akash/deployment/v1beta4/service.ts new file mode 100644 index 00000000..3668124a --- /dev/null +++ b/ts/src/generated/akash/deployment/v1beta4/service.ts @@ -0,0 +1,134 @@ +/* eslint-disable */ +import _m0 from 'protobufjs/minimal'; +import { + MsgCloseDeployment, + MsgCloseDeploymentResponse, + MsgCreateDeployment, + MsgCreateDeploymentResponse, + MsgDepositDeployment, + MsgDepositDeploymentResponse, + MsgUpdateDeployment, + MsgUpdateDeploymentResponse, +} from './deploymentmsg'; +import { + MsgCloseGroup, + MsgCloseGroupResponse, + MsgPauseGroup, + MsgPauseGroupResponse, + MsgStartGroup, + MsgStartGroupResponse, +} from './groupmsg'; + +/** Msg defines the deployment Msg service. */ +export interface Msg { + /** CreateDeployment defines a method to create new deployment given proper inputs. */ + CreateDeployment( + request: MsgCreateDeployment, + ): Promise; + /** DepositDeployment deposits more funds into the deployment account */ + DepositDeployment( + request: MsgDepositDeployment, + ): Promise; + /** UpdateDeployment defines a method to update a deployment given proper inputs. */ + UpdateDeployment( + request: MsgUpdateDeployment, + ): Promise; + /** CloseDeployment defines a method to close a deployment given proper inputs. */ + CloseDeployment( + request: MsgCloseDeployment, + ): Promise; + /** CloseGroup defines a method to close a group of a deployment given proper inputs. */ + CloseGroup(request: MsgCloseGroup): Promise; + /** PauseGroup defines a method to close a group of a deployment given proper inputs. */ + PauseGroup(request: MsgPauseGroup): Promise; + /** StartGroup defines a method to close a group of a deployment given proper inputs. */ + StartGroup(request: MsgStartGroup): Promise; +} + +export const MsgServiceName = 'akash.deployment.v1beta4.Msg'; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.CreateDeployment = this.CreateDeployment.bind(this); + this.DepositDeployment = this.DepositDeployment.bind(this); + this.UpdateDeployment = this.UpdateDeployment.bind(this); + this.CloseDeployment = this.CloseDeployment.bind(this); + this.CloseGroup = this.CloseGroup.bind(this); + this.PauseGroup = this.PauseGroup.bind(this); + this.StartGroup = this.StartGroup.bind(this); + } + CreateDeployment( + request: MsgCreateDeployment, + ): Promise { + const data = MsgCreateDeployment.encode(request).finish(); + const promise = this.rpc.request(this.service, 'CreateDeployment', data); + return promise.then((data) => + MsgCreateDeploymentResponse.decode(_m0.Reader.create(data)), + ); + } + + DepositDeployment( + request: MsgDepositDeployment, + ): Promise { + const data = MsgDepositDeployment.encode(request).finish(); + const promise = this.rpc.request(this.service, 'DepositDeployment', data); + return promise.then((data) => + MsgDepositDeploymentResponse.decode(_m0.Reader.create(data)), + ); + } + + UpdateDeployment( + request: MsgUpdateDeployment, + ): Promise { + const data = MsgUpdateDeployment.encode(request).finish(); + const promise = this.rpc.request(this.service, 'UpdateDeployment', data); + return promise.then((data) => + MsgUpdateDeploymentResponse.decode(_m0.Reader.create(data)), + ); + } + + CloseDeployment( + request: MsgCloseDeployment, + ): Promise { + const data = MsgCloseDeployment.encode(request).finish(); + const promise = this.rpc.request(this.service, 'CloseDeployment', data); + return promise.then((data) => + MsgCloseDeploymentResponse.decode(_m0.Reader.create(data)), + ); + } + + CloseGroup(request: MsgCloseGroup): Promise { + const data = MsgCloseGroup.encode(request).finish(); + const promise = this.rpc.request(this.service, 'CloseGroup', data); + return promise.then((data) => + MsgCloseGroupResponse.decode(_m0.Reader.create(data)), + ); + } + + PauseGroup(request: MsgPauseGroup): Promise { + const data = MsgPauseGroup.encode(request).finish(); + const promise = this.rpc.request(this.service, 'PauseGroup', data); + return promise.then((data) => + MsgPauseGroupResponse.decode(_m0.Reader.create(data)), + ); + } + + StartGroup(request: MsgStartGroup): Promise { + const data = MsgStartGroup.encode(request).finish(); + const promise = this.rpc.request(this.service, 'StartGroup', data); + return promise.then((data) => + MsgStartGroupResponse.decode(_m0.Reader.create(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} diff --git a/ts/src/generated/akash/manifest/v2beta3/group.ts b/ts/src/generated/akash/manifest/v2beta3/group.ts new file mode 100644 index 00000000..8d91769c --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/group.ts @@ -0,0 +1,125 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Service } from './service'; + +/** Group store name and list of services */ +export interface Group { + $type: 'akash.manifest.v2beta3.Group'; + name: string; + services: Service[]; +} + +function createBaseGroup(): Group { + return { $type: 'akash.manifest.v2beta3.Group', name: '', services: [] }; +} + +export const Group = { + $type: 'akash.manifest.v2beta3.Group' as const, + + encode(message: Group, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.name !== '') { + writer.uint32(10).string(message.name); + } + for (const v of message.services) { + Service.encode(v!, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Group { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGroup(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.services.push(Service.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Group { + return { + $type: Group.$type, + name: isSet(object.name) ? globalThis.String(object.name) : '', + services: globalThis.Array.isArray(object?.services) + ? object.services.map((e: any) => Service.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Group): unknown { + const obj: any = {}; + if (message.name !== '') { + obj.name = message.name; + } + if (message.services?.length) { + obj.services = message.services.map((e) => Service.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): Group { + return Group.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Group { + const message = createBaseGroup(); + message.name = object.name ?? ''; + message.services = + object.services?.map((e) => Service.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Group.$type, Group); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/manifest/v2beta3/httpoptions.ts b/ts/src/generated/akash/manifest/v2beta3/httpoptions.ts new file mode 100644 index 00000000..aa668eec --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/httpoptions.ts @@ -0,0 +1,218 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; + +/** ServiceExposeHTTPOptions */ +export interface ServiceExposeHTTPOptions { + $type: 'akash.manifest.v2beta3.ServiceExposeHTTPOptions'; + maxBodySize: number; + readTimeout: number; + sendTimeout: number; + nextTries: number; + nextTimeout: number; + nextCases: string[]; +} + +function createBaseServiceExposeHTTPOptions(): ServiceExposeHTTPOptions { + return { + $type: 'akash.manifest.v2beta3.ServiceExposeHTTPOptions', + maxBodySize: 0, + readTimeout: 0, + sendTimeout: 0, + nextTries: 0, + nextTimeout: 0, + nextCases: [], + }; +} + +export const ServiceExposeHTTPOptions = { + $type: 'akash.manifest.v2beta3.ServiceExposeHTTPOptions' as const, + + encode( + message: ServiceExposeHTTPOptions, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.maxBodySize !== 0) { + writer.uint32(8).uint32(message.maxBodySize); + } + if (message.readTimeout !== 0) { + writer.uint32(16).uint32(message.readTimeout); + } + if (message.sendTimeout !== 0) { + writer.uint32(24).uint32(message.sendTimeout); + } + if (message.nextTries !== 0) { + writer.uint32(32).uint32(message.nextTries); + } + if (message.nextTimeout !== 0) { + writer.uint32(40).uint32(message.nextTimeout); + } + for (const v of message.nextCases) { + writer.uint32(50).string(v!); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): ServiceExposeHTTPOptions { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseServiceExposeHTTPOptions(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 8) { + break; + } + + message.maxBodySize = reader.uint32(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.readTimeout = reader.uint32(); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.sendTimeout = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.nextTries = reader.uint32(); + continue; + case 5: + if (tag !== 40) { + break; + } + + message.nextTimeout = reader.uint32(); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.nextCases.push(reader.string()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ServiceExposeHTTPOptions { + return { + $type: ServiceExposeHTTPOptions.$type, + maxBodySize: isSet(object.maxBodySize) + ? globalThis.Number(object.maxBodySize) + : 0, + readTimeout: isSet(object.readTimeout) + ? globalThis.Number(object.readTimeout) + : 0, + sendTimeout: isSet(object.sendTimeout) + ? globalThis.Number(object.sendTimeout) + : 0, + nextTries: isSet(object.nextTries) + ? globalThis.Number(object.nextTries) + : 0, + nextTimeout: isSet(object.nextTimeout) + ? globalThis.Number(object.nextTimeout) + : 0, + nextCases: globalThis.Array.isArray(object?.nextCases) + ? object.nextCases.map((e: any) => globalThis.String(e)) + : [], + }; + }, + + toJSON(message: ServiceExposeHTTPOptions): unknown { + const obj: any = {}; + if (message.maxBodySize !== 0) { + obj.maxBodySize = Math.round(message.maxBodySize); + } + if (message.readTimeout !== 0) { + obj.readTimeout = Math.round(message.readTimeout); + } + if (message.sendTimeout !== 0) { + obj.sendTimeout = Math.round(message.sendTimeout); + } + if (message.nextTries !== 0) { + obj.nextTries = Math.round(message.nextTries); + } + if (message.nextTimeout !== 0) { + obj.nextTimeout = Math.round(message.nextTimeout); + } + if (message.nextCases?.length) { + obj.nextCases = message.nextCases; + } + return obj; + }, + + create( + base?: DeepPartial, + ): ServiceExposeHTTPOptions { + return ServiceExposeHTTPOptions.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): ServiceExposeHTTPOptions { + const message = createBaseServiceExposeHTTPOptions(); + message.maxBodySize = object.maxBodySize ?? 0; + message.readTimeout = object.readTimeout ?? 0; + message.sendTimeout = object.sendTimeout ?? 0; + message.nextTries = object.nextTries ?? 0; + message.nextTimeout = object.nextTimeout ?? 0; + message.nextCases = object.nextCases?.map((e) => e) || []; + return message; + }, +}; + +messageTypeRegistry.set( + ServiceExposeHTTPOptions.$type, + ServiceExposeHTTPOptions, +); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/manifest/v2beta3/service.grpc-js.ts b/ts/src/generated/akash/manifest/v2beta3/service.grpc-js.ts new file mode 100644 index 00000000..bd54b7cb --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/service.grpc-js.ts @@ -0,0 +1,623 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Resources } from '../../base/resources/v1/resources'; +import { ServiceExpose } from './serviceexpose'; + +export const protobufPackage = 'akash.manifest.v2beta3'; + +/** StorageParams */ +export interface StorageParams { + $type: 'akash.manifest.v2beta3.StorageParams'; + name: string; + mount: string; + readOnly: boolean; +} + +/** ServiceParams */ +export interface ServiceParams { + $type: 'akash.manifest.v2beta3.ServiceParams'; + storage: StorageParams[]; + credentials: ImageCredentials | undefined; +} + +/** Credentials to fetch image from registry */ +export interface ImageCredentials { + $type: 'akash.manifest.v2beta3.ImageCredentials'; + host: string; + email: string; + username: string; + password: string; +} + +/** Service stores name, image, args, env, unit, count and expose list of service */ +export interface Service { + $type: 'akash.manifest.v2beta3.Service'; + name: string; + image: string; + command: string[]; + args: string[]; + env: string[]; + resources: Resources | undefined; + count: number; + expose: ServiceExpose[]; + params: ServiceParams | undefined; +} + +function createBaseStorageParams(): StorageParams { + return { + $type: 'akash.manifest.v2beta3.StorageParams', + name: '', + mount: '', + readOnly: false, + }; +} + +export const StorageParams = { + $type: 'akash.manifest.v2beta3.StorageParams' as const, + + encode( + message: StorageParams, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.name !== '') { + writer.uint32(10).string(message.name); + } + if (message.mount !== '') { + writer.uint32(18).string(message.mount); + } + if (message.readOnly !== false) { + writer.uint32(24).bool(message.readOnly); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StorageParams { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseStorageParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.mount = reader.string(); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.readOnly = reader.bool(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): StorageParams { + return { + $type: StorageParams.$type, + name: isSet(object.name) ? globalThis.String(object.name) : '', + mount: isSet(object.mount) ? globalThis.String(object.mount) : '', + readOnly: isSet(object.readOnly) + ? globalThis.Boolean(object.readOnly) + : false, + }; + }, + + toJSON(message: StorageParams): unknown { + const obj: any = {}; + if (message.name !== '') { + obj.name = message.name; + } + if (message.mount !== '') { + obj.mount = message.mount; + } + if (message.readOnly !== false) { + obj.readOnly = message.readOnly; + } + return obj; + }, + + create(base?: DeepPartial): StorageParams { + return StorageParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): StorageParams { + const message = createBaseStorageParams(); + message.name = object.name ?? ''; + message.mount = object.mount ?? ''; + message.readOnly = object.readOnly ?? false; + return message; + }, +}; + +messageTypeRegistry.set(StorageParams.$type, StorageParams); + +function createBaseServiceParams(): ServiceParams { + return { + $type: 'akash.manifest.v2beta3.ServiceParams', + storage: [], + credentials: undefined, + }; +} + +export const ServiceParams = { + $type: 'akash.manifest.v2beta3.ServiceParams' as const, + + encode( + message: ServiceParams, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.storage) { + StorageParams.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.credentials !== undefined) { + ImageCredentials.encode( + message.credentials, + writer.uint32(82).fork(), + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ServiceParams { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseServiceParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.storage.push(StorageParams.decode(reader, reader.uint32())); + continue; + case 10: + if (tag !== 82) { + break; + } + + message.credentials = ImageCredentials.decode( + reader, + reader.uint32(), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ServiceParams { + return { + $type: ServiceParams.$type, + storage: globalThis.Array.isArray(object?.storage) + ? object.storage.map((e: any) => StorageParams.fromJSON(e)) + : [], + credentials: isSet(object.credentials) + ? ImageCredentials.fromJSON(object.credentials) + : undefined, + }; + }, + + toJSON(message: ServiceParams): unknown { + const obj: any = {}; + if (message.storage?.length) { + obj.storage = message.storage.map((e) => StorageParams.toJSON(e)); + } + if (message.credentials !== undefined) { + obj.credentials = ImageCredentials.toJSON(message.credentials); + } + return obj; + }, + + create(base?: DeepPartial): ServiceParams { + return ServiceParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ServiceParams { + const message = createBaseServiceParams(); + message.storage = + object.storage?.map((e) => StorageParams.fromPartial(e)) || []; + message.credentials = + object.credentials !== undefined && object.credentials !== null + ? ImageCredentials.fromPartial(object.credentials) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ServiceParams.$type, ServiceParams); + +function createBaseImageCredentials(): ImageCredentials { + return { + $type: 'akash.manifest.v2beta3.ImageCredentials', + host: '', + email: '', + username: '', + password: '', + }; +} + +export const ImageCredentials = { + $type: 'akash.manifest.v2beta3.ImageCredentials' as const, + + encode( + message: ImageCredentials, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.host !== '') { + writer.uint32(10).string(message.host); + } + if (message.email !== '') { + writer.uint32(18).string(message.email); + } + if (message.username !== '') { + writer.uint32(26).string(message.username); + } + if (message.password !== '') { + writer.uint32(34).string(message.password); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ImageCredentials { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseImageCredentials(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.host = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.email = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.username = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.password = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ImageCredentials { + return { + $type: ImageCredentials.$type, + host: isSet(object.host) ? globalThis.String(object.host) : '', + email: isSet(object.email) ? globalThis.String(object.email) : '', + username: isSet(object.username) + ? globalThis.String(object.username) + : '', + password: isSet(object.password) + ? globalThis.String(object.password) + : '', + }; + }, + + toJSON(message: ImageCredentials): unknown { + const obj: any = {}; + if (message.host !== '') { + obj.host = message.host; + } + if (message.email !== '') { + obj.email = message.email; + } + if (message.username !== '') { + obj.username = message.username; + } + if (message.password !== '') { + obj.password = message.password; + } + return obj; + }, + + create(base?: DeepPartial): ImageCredentials { + return ImageCredentials.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ImageCredentials { + const message = createBaseImageCredentials(); + message.host = object.host ?? ''; + message.email = object.email ?? ''; + message.username = object.username ?? ''; + message.password = object.password ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(ImageCredentials.$type, ImageCredentials); + +function createBaseService(): Service { + return { + $type: 'akash.manifest.v2beta3.Service', + name: '', + image: '', + command: [], + args: [], + env: [], + resources: undefined, + count: 0, + expose: [], + params: undefined, + }; +} + +export const Service = { + $type: 'akash.manifest.v2beta3.Service' as const, + + encode( + message: Service, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.name !== '') { + writer.uint32(10).string(message.name); + } + if (message.image !== '') { + writer.uint32(18).string(message.image); + } + for (const v of message.command) { + writer.uint32(26).string(v!); + } + for (const v of message.args) { + writer.uint32(34).string(v!); + } + for (const v of message.env) { + writer.uint32(42).string(v!); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(50).fork()).ldelim(); + } + if (message.count !== 0) { + writer.uint32(56).uint32(message.count); + } + for (const v of message.expose) { + ServiceExpose.encode(v!, writer.uint32(66).fork()).ldelim(); + } + if (message.params !== undefined) { + ServiceParams.encode(message.params, writer.uint32(74).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Service { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseService(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.image = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.command.push(reader.string()); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.args.push(reader.string()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.env.push(reader.string()); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.resources = Resources.decode(reader, reader.uint32()); + continue; + case 7: + if (tag !== 56) { + break; + } + + message.count = reader.uint32(); + continue; + case 8: + if (tag !== 66) { + break; + } + + message.expose.push(ServiceExpose.decode(reader, reader.uint32())); + continue; + case 9: + if (tag !== 74) { + break; + } + + message.params = ServiceParams.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Service { + return { + $type: Service.$type, + name: isSet(object.name) ? globalThis.String(object.name) : '', + image: isSet(object.image) ? globalThis.String(object.image) : '', + command: globalThis.Array.isArray(object?.command) + ? object.command.map((e: any) => globalThis.String(e)) + : [], + args: globalThis.Array.isArray(object?.args) + ? object.args.map((e: any) => globalThis.String(e)) + : [], + env: globalThis.Array.isArray(object?.env) + ? object.env.map((e: any) => globalThis.String(e)) + : [], + resources: isSet(object.resources) + ? Resources.fromJSON(object.resources) + : undefined, + count: isSet(object.count) ? globalThis.Number(object.count) : 0, + expose: globalThis.Array.isArray(object?.expose) + ? object.expose.map((e: any) => ServiceExpose.fromJSON(e)) + : [], + params: isSet(object.params) + ? ServiceParams.fromJSON(object.params) + : undefined, + }; + }, + + toJSON(message: Service): unknown { + const obj: any = {}; + if (message.name !== '') { + obj.name = message.name; + } + if (message.image !== '') { + obj.image = message.image; + } + if (message.command?.length) { + obj.command = message.command; + } + if (message.args?.length) { + obj.args = message.args; + } + if (message.env?.length) { + obj.env = message.env; + } + if (message.resources !== undefined) { + obj.resources = Resources.toJSON(message.resources); + } + if (message.count !== 0) { + obj.count = Math.round(message.count); + } + if (message.expose?.length) { + obj.expose = message.expose.map((e) => ServiceExpose.toJSON(e)); + } + if (message.params !== undefined) { + obj.params = ServiceParams.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): Service { + return Service.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Service { + const message = createBaseService(); + message.name = object.name ?? ''; + message.image = object.image ?? ''; + message.command = object.command?.map((e) => e) || []; + message.args = object.args?.map((e) => e) || []; + message.env = object.env?.map((e) => e) || []; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.count = object.count ?? 0; + message.expose = + object.expose?.map((e) => ServiceExpose.fromPartial(e)) || []; + message.params = + object.params !== undefined && object.params !== null + ? ServiceParams.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Service.$type, Service); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +export type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/manifest/v2beta3/service.ts b/ts/src/generated/akash/manifest/v2beta3/service.ts new file mode 100644 index 00000000..b74902ae --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/service.ts @@ -0,0 +1,621 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Resources } from '../../base/resources/v1/resources'; +import { ServiceExpose } from './serviceexpose'; + +/** StorageParams */ +export interface StorageParams { + $type: 'akash.manifest.v2beta3.StorageParams'; + name: string; + mount: string; + readOnly: boolean; +} + +/** ServiceParams */ +export interface ServiceParams { + $type: 'akash.manifest.v2beta3.ServiceParams'; + storage: StorageParams[]; + credentials: ImageCredentials | undefined; +} + +/** Credentials to fetch image from registry */ +export interface ImageCredentials { + $type: 'akash.manifest.v2beta3.ImageCredentials'; + host: string; + email: string; + username: string; + password: string; +} + +/** Service stores name, image, args, env, unit, count and expose list of service */ +export interface Service { + $type: 'akash.manifest.v2beta3.Service'; + name: string; + image: string; + command: string[]; + args: string[]; + env: string[]; + resources: Resources | undefined; + count: number; + expose: ServiceExpose[]; + params: ServiceParams | undefined; +} + +function createBaseStorageParams(): StorageParams { + return { + $type: 'akash.manifest.v2beta3.StorageParams', + name: '', + mount: '', + readOnly: false, + }; +} + +export const StorageParams = { + $type: 'akash.manifest.v2beta3.StorageParams' as const, + + encode( + message: StorageParams, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.name !== '') { + writer.uint32(10).string(message.name); + } + if (message.mount !== '') { + writer.uint32(18).string(message.mount); + } + if (message.readOnly !== false) { + writer.uint32(24).bool(message.readOnly); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): StorageParams { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseStorageParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.mount = reader.string(); + continue; + case 3: + if (tag !== 24) { + break; + } + + message.readOnly = reader.bool(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): StorageParams { + return { + $type: StorageParams.$type, + name: isSet(object.name) ? globalThis.String(object.name) : '', + mount: isSet(object.mount) ? globalThis.String(object.mount) : '', + readOnly: isSet(object.readOnly) + ? globalThis.Boolean(object.readOnly) + : false, + }; + }, + + toJSON(message: StorageParams): unknown { + const obj: any = {}; + if (message.name !== '') { + obj.name = message.name; + } + if (message.mount !== '') { + obj.mount = message.mount; + } + if (message.readOnly !== false) { + obj.readOnly = message.readOnly; + } + return obj; + }, + + create(base?: DeepPartial): StorageParams { + return StorageParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): StorageParams { + const message = createBaseStorageParams(); + message.name = object.name ?? ''; + message.mount = object.mount ?? ''; + message.readOnly = object.readOnly ?? false; + return message; + }, +}; + +messageTypeRegistry.set(StorageParams.$type, StorageParams); + +function createBaseServiceParams(): ServiceParams { + return { + $type: 'akash.manifest.v2beta3.ServiceParams', + storage: [], + credentials: undefined, + }; +} + +export const ServiceParams = { + $type: 'akash.manifest.v2beta3.ServiceParams' as const, + + encode( + message: ServiceParams, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.storage) { + StorageParams.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.credentials !== undefined) { + ImageCredentials.encode( + message.credentials, + writer.uint32(82).fork(), + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ServiceParams { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseServiceParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.storage.push(StorageParams.decode(reader, reader.uint32())); + continue; + case 10: + if (tag !== 82) { + break; + } + + message.credentials = ImageCredentials.decode( + reader, + reader.uint32(), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ServiceParams { + return { + $type: ServiceParams.$type, + storage: globalThis.Array.isArray(object?.storage) + ? object.storage.map((e: any) => StorageParams.fromJSON(e)) + : [], + credentials: isSet(object.credentials) + ? ImageCredentials.fromJSON(object.credentials) + : undefined, + }; + }, + + toJSON(message: ServiceParams): unknown { + const obj: any = {}; + if (message.storage?.length) { + obj.storage = message.storage.map((e) => StorageParams.toJSON(e)); + } + if (message.credentials !== undefined) { + obj.credentials = ImageCredentials.toJSON(message.credentials); + } + return obj; + }, + + create(base?: DeepPartial): ServiceParams { + return ServiceParams.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ServiceParams { + const message = createBaseServiceParams(); + message.storage = + object.storage?.map((e) => StorageParams.fromPartial(e)) || []; + message.credentials = + object.credentials !== undefined && object.credentials !== null + ? ImageCredentials.fromPartial(object.credentials) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(ServiceParams.$type, ServiceParams); + +function createBaseImageCredentials(): ImageCredentials { + return { + $type: 'akash.manifest.v2beta3.ImageCredentials', + host: '', + email: '', + username: '', + password: '', + }; +} + +export const ImageCredentials = { + $type: 'akash.manifest.v2beta3.ImageCredentials' as const, + + encode( + message: ImageCredentials, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.host !== '') { + writer.uint32(10).string(message.host); + } + if (message.email !== '') { + writer.uint32(18).string(message.email); + } + if (message.username !== '') { + writer.uint32(26).string(message.username); + } + if (message.password !== '') { + writer.uint32(34).string(message.password); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ImageCredentials { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseImageCredentials(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.host = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.email = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.username = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.password = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ImageCredentials { + return { + $type: ImageCredentials.$type, + host: isSet(object.host) ? globalThis.String(object.host) : '', + email: isSet(object.email) ? globalThis.String(object.email) : '', + username: isSet(object.username) + ? globalThis.String(object.username) + : '', + password: isSet(object.password) + ? globalThis.String(object.password) + : '', + }; + }, + + toJSON(message: ImageCredentials): unknown { + const obj: any = {}; + if (message.host !== '') { + obj.host = message.host; + } + if (message.email !== '') { + obj.email = message.email; + } + if (message.username !== '') { + obj.username = message.username; + } + if (message.password !== '') { + obj.password = message.password; + } + return obj; + }, + + create(base?: DeepPartial): ImageCredentials { + return ImageCredentials.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ImageCredentials { + const message = createBaseImageCredentials(); + message.host = object.host ?? ''; + message.email = object.email ?? ''; + message.username = object.username ?? ''; + message.password = object.password ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(ImageCredentials.$type, ImageCredentials); + +function createBaseService(): Service { + return { + $type: 'akash.manifest.v2beta3.Service', + name: '', + image: '', + command: [], + args: [], + env: [], + resources: undefined, + count: 0, + expose: [], + params: undefined, + }; +} + +export const Service = { + $type: 'akash.manifest.v2beta3.Service' as const, + + encode( + message: Service, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.name !== '') { + writer.uint32(10).string(message.name); + } + if (message.image !== '') { + writer.uint32(18).string(message.image); + } + for (const v of message.command) { + writer.uint32(26).string(v!); + } + for (const v of message.args) { + writer.uint32(34).string(v!); + } + for (const v of message.env) { + writer.uint32(42).string(v!); + } + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(50).fork()).ldelim(); + } + if (message.count !== 0) { + writer.uint32(56).uint32(message.count); + } + for (const v of message.expose) { + ServiceExpose.encode(v!, writer.uint32(66).fork()).ldelim(); + } + if (message.params !== undefined) { + ServiceParams.encode(message.params, writer.uint32(74).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Service { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseService(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.name = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.image = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.command.push(reader.string()); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.args.push(reader.string()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.env.push(reader.string()); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.resources = Resources.decode(reader, reader.uint32()); + continue; + case 7: + if (tag !== 56) { + break; + } + + message.count = reader.uint32(); + continue; + case 8: + if (tag !== 66) { + break; + } + + message.expose.push(ServiceExpose.decode(reader, reader.uint32())); + continue; + case 9: + if (tag !== 74) { + break; + } + + message.params = ServiceParams.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Service { + return { + $type: Service.$type, + name: isSet(object.name) ? globalThis.String(object.name) : '', + image: isSet(object.image) ? globalThis.String(object.image) : '', + command: globalThis.Array.isArray(object?.command) + ? object.command.map((e: any) => globalThis.String(e)) + : [], + args: globalThis.Array.isArray(object?.args) + ? object.args.map((e: any) => globalThis.String(e)) + : [], + env: globalThis.Array.isArray(object?.env) + ? object.env.map((e: any) => globalThis.String(e)) + : [], + resources: isSet(object.resources) + ? Resources.fromJSON(object.resources) + : undefined, + count: isSet(object.count) ? globalThis.Number(object.count) : 0, + expose: globalThis.Array.isArray(object?.expose) + ? object.expose.map((e: any) => ServiceExpose.fromJSON(e)) + : [], + params: isSet(object.params) + ? ServiceParams.fromJSON(object.params) + : undefined, + }; + }, + + toJSON(message: Service): unknown { + const obj: any = {}; + if (message.name !== '') { + obj.name = message.name; + } + if (message.image !== '') { + obj.image = message.image; + } + if (message.command?.length) { + obj.command = message.command; + } + if (message.args?.length) { + obj.args = message.args; + } + if (message.env?.length) { + obj.env = message.env; + } + if (message.resources !== undefined) { + obj.resources = Resources.toJSON(message.resources); + } + if (message.count !== 0) { + obj.count = Math.round(message.count); + } + if (message.expose?.length) { + obj.expose = message.expose.map((e) => ServiceExpose.toJSON(e)); + } + if (message.params !== undefined) { + obj.params = ServiceParams.toJSON(message.params); + } + return obj; + }, + + create(base?: DeepPartial): Service { + return Service.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Service { + const message = createBaseService(); + message.name = object.name ?? ''; + message.image = object.image ?? ''; + message.command = object.command?.map((e) => e) || []; + message.args = object.args?.map((e) => e) || []; + message.env = object.env?.map((e) => e) || []; + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.count = object.count ?? 0; + message.expose = + object.expose?.map((e) => ServiceExpose.fromPartial(e)) || []; + message.params = + object.params !== undefined && object.params !== null + ? ServiceParams.fromPartial(object.params) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Service.$type, Service); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/manifest/v2beta3/serviceexpose.ts b/ts/src/generated/akash/manifest/v2beta3/serviceexpose.ts new file mode 100644 index 00000000..4fe6ebcc --- /dev/null +++ b/ts/src/generated/akash/manifest/v2beta3/serviceexpose.ts @@ -0,0 +1,269 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { ServiceExposeHTTPOptions } from './httpoptions'; + +/** ServiceExpose stores exposed ports and hosts details */ +export interface ServiceExpose { + $type: 'akash.manifest.v2beta3.ServiceExpose'; + /** port on the container */ + port: number; + /** port on the service definition */ + externalPort: number; + proto: string; + service: string; + global: boolean; + hosts: string[]; + httpOptions: ServiceExposeHTTPOptions | undefined; + /** The name of the IP address associated with this, if any */ + ip: string; + /** The sequence number of the associated endpoint in the on-chain data */ + endpointSequenceNumber: number; +} + +function createBaseServiceExpose(): ServiceExpose { + return { + $type: 'akash.manifest.v2beta3.ServiceExpose', + port: 0, + externalPort: 0, + proto: '', + service: '', + global: false, + hosts: [], + httpOptions: undefined, + ip: '', + endpointSequenceNumber: 0, + }; +} + +export const ServiceExpose = { + $type: 'akash.manifest.v2beta3.ServiceExpose' as const, + + encode( + message: ServiceExpose, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.port !== 0) { + writer.uint32(8).uint32(message.port); + } + if (message.externalPort !== 0) { + writer.uint32(16).uint32(message.externalPort); + } + if (message.proto !== '') { + writer.uint32(26).string(message.proto); + } + if (message.service !== '') { + writer.uint32(34).string(message.service); + } + if (message.global !== false) { + writer.uint32(40).bool(message.global); + } + for (const v of message.hosts) { + writer.uint32(50).string(v!); + } + if (message.httpOptions !== undefined) { + ServiceExposeHTTPOptions.encode( + message.httpOptions, + writer.uint32(58).fork(), + ).ldelim(); + } + if (message.ip !== '') { + writer.uint32(66).string(message.ip); + } + if (message.endpointSequenceNumber !== 0) { + writer.uint32(72).uint32(message.endpointSequenceNumber); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ServiceExpose { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseServiceExpose(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 8) { + break; + } + + message.port = reader.uint32(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.externalPort = reader.uint32(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.proto = reader.string(); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.service = reader.string(); + continue; + case 5: + if (tag !== 40) { + break; + } + + message.global = reader.bool(); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.hosts.push(reader.string()); + continue; + case 7: + if (tag !== 58) { + break; + } + + message.httpOptions = ServiceExposeHTTPOptions.decode( + reader, + reader.uint32(), + ); + continue; + case 8: + if (tag !== 66) { + break; + } + + message.ip = reader.string(); + continue; + case 9: + if (tag !== 72) { + break; + } + + message.endpointSequenceNumber = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ServiceExpose { + return { + $type: ServiceExpose.$type, + port: isSet(object.port) ? globalThis.Number(object.port) : 0, + externalPort: isSet(object.externalPort) + ? globalThis.Number(object.externalPort) + : 0, + proto: isSet(object.proto) ? globalThis.String(object.proto) : '', + service: isSet(object.service) ? globalThis.String(object.service) : '', + global: isSet(object.global) ? globalThis.Boolean(object.global) : false, + hosts: globalThis.Array.isArray(object?.hosts) + ? object.hosts.map((e: any) => globalThis.String(e)) + : [], + httpOptions: isSet(object.httpOptions) + ? ServiceExposeHTTPOptions.fromJSON(object.httpOptions) + : undefined, + ip: isSet(object.ip) ? globalThis.String(object.ip) : '', + endpointSequenceNumber: isSet(object.endpointSequenceNumber) + ? globalThis.Number(object.endpointSequenceNumber) + : 0, + }; + }, + + toJSON(message: ServiceExpose): unknown { + const obj: any = {}; + if (message.port !== 0) { + obj.port = Math.round(message.port); + } + if (message.externalPort !== 0) { + obj.externalPort = Math.round(message.externalPort); + } + if (message.proto !== '') { + obj.proto = message.proto; + } + if (message.service !== '') { + obj.service = message.service; + } + if (message.global !== false) { + obj.global = message.global; + } + if (message.hosts?.length) { + obj.hosts = message.hosts; + } + if (message.httpOptions !== undefined) { + obj.httpOptions = ServiceExposeHTTPOptions.toJSON(message.httpOptions); + } + if (message.ip !== '') { + obj.ip = message.ip; + } + if (message.endpointSequenceNumber !== 0) { + obj.endpointSequenceNumber = Math.round(message.endpointSequenceNumber); + } + return obj; + }, + + create(base?: DeepPartial): ServiceExpose { + return ServiceExpose.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ServiceExpose { + const message = createBaseServiceExpose(); + message.port = object.port ?? 0; + message.externalPort = object.externalPort ?? 0; + message.proto = object.proto ?? ''; + message.service = object.service ?? ''; + message.global = object.global ?? false; + message.hosts = object.hosts?.map((e) => e) || []; + message.httpOptions = + object.httpOptions !== undefined && object.httpOptions !== null + ? ServiceExposeHTTPOptions.fromPartial(object.httpOptions) + : undefined; + message.ip = object.ip ?? ''; + message.endpointSequenceNumber = object.endpointSequenceNumber ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ServiceExpose.$type, ServiceExpose); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/market/v1beta5/bid.ts b/ts/src/generated/akash/market/v1beta5/bid.ts new file mode 100644 index 00000000..6ffede2e --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/bid.ts @@ -0,0 +1,1030 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { Coin, DecCoin } from '../../../cosmos/base/v1beta1/coin'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Resources } from '../../base/resources/v1/resources'; +import { OrderID } from './order'; + +/** + * ResourceOffer describes resources that provider is offering + * for deployment + */ +export interface ResourceOffer { + $type: 'akash.market.v1beta5.ResourceOffer'; + resources: Resources | undefined; + count: number; +} + +/** MsgCreateBid defines an SDK message for creating Bid */ +export interface MsgCreateBid { + $type: 'akash.market.v1beta5.MsgCreateBid'; + order: OrderID | undefined; + provider: string; + price: DecCoin | undefined; + deposit: Coin | undefined; + resourcesOffer: ResourceOffer[]; +} + +/** MsgCreateBidResponse defines the Msg/CreateBid response type. */ +export interface MsgCreateBidResponse { + $type: 'akash.market.v1beta5.MsgCreateBidResponse'; +} + +/** MsgCloseBid defines an SDK message for closing bid */ +export interface MsgCloseBid { + $type: 'akash.market.v1beta5.MsgCloseBid'; + bidId: BidID | undefined; +} + +/** MsgCloseBidResponse defines the Msg/CloseBid response type. */ +export interface MsgCloseBidResponse { + $type: 'akash.market.v1beta5.MsgCloseBidResponse'; +} + +/** + * BidID stores owner and all other seq numbers + * A successful bid becomes a Lease(ID). + */ +export interface BidID { + $type: 'akash.market.v1beta5.BidID'; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + provider: string; +} + +/** Bid stores BidID, state of bid and price */ +export interface Bid { + $type: 'akash.market.v1beta5.Bid'; + bidId: BidID | undefined; + state: Bid_State; + price: DecCoin | undefined; + createdAt: Long; + resourcesOffer: ResourceOffer[]; +} + +/** State is an enum which refers to state of bid */ +export enum Bid_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** open - BidOpen denotes state for bid open */ + open = 1, + /** active - BidMatched denotes state for bid open */ + active = 2, + /** lost - BidLost denotes state for bid lost */ + lost = 3, + /** closed - BidClosed denotes state for bid closed */ + closed = 4, + UNRECOGNIZED = -1, +} + +export function bid_StateFromJSON(object: any): Bid_State { + switch (object) { + case 0: + case 'invalid': + return Bid_State.invalid; + case 1: + case 'open': + return Bid_State.open; + case 2: + case 'active': + return Bid_State.active; + case 3: + case 'lost': + return Bid_State.lost; + case 4: + case 'closed': + return Bid_State.closed; + case -1: + case 'UNRECOGNIZED': + default: + return Bid_State.UNRECOGNIZED; + } +} + +export function bid_StateToJSON(object: Bid_State): string { + switch (object) { + case Bid_State.invalid: + return 'invalid'; + case Bid_State.open: + return 'open'; + case Bid_State.active: + return 'active'; + case Bid_State.lost: + return 'lost'; + case Bid_State.closed: + return 'closed'; + case Bid_State.UNRECOGNIZED: + default: + return 'UNRECOGNIZED'; + } +} + +/** BidFilters defines flags for bid list filter */ +export interface BidFilters { + $type: 'akash.market.v1beta5.BidFilters'; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + provider: string; + state: string; +} + +function createBaseResourceOffer(): ResourceOffer { + return { + $type: 'akash.market.v1beta5.ResourceOffer', + resources: undefined, + count: 0, + }; +} + +export const ResourceOffer = { + $type: 'akash.market.v1beta5.ResourceOffer' as const, + + encode( + message: ResourceOffer, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.resources !== undefined) { + Resources.encode(message.resources, writer.uint32(10).fork()).ldelim(); + } + if (message.count !== 0) { + writer.uint32(16).uint32(message.count); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ResourceOffer { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseResourceOffer(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.resources = Resources.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.count = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ResourceOffer { + return { + $type: ResourceOffer.$type, + resources: isSet(object.resources) + ? Resources.fromJSON(object.resources) + : undefined, + count: isSet(object.count) ? globalThis.Number(object.count) : 0, + }; + }, + + toJSON(message: ResourceOffer): unknown { + const obj: any = {}; + if (message.resources !== undefined) { + obj.resources = Resources.toJSON(message.resources); + } + if (message.count !== 0) { + obj.count = Math.round(message.count); + } + return obj; + }, + + create(base?: DeepPartial): ResourceOffer { + return ResourceOffer.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ResourceOffer { + const message = createBaseResourceOffer(); + message.resources = + object.resources !== undefined && object.resources !== null + ? Resources.fromPartial(object.resources) + : undefined; + message.count = object.count ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(ResourceOffer.$type, ResourceOffer); + +function createBaseMsgCreateBid(): MsgCreateBid { + return { + $type: 'akash.market.v1beta5.MsgCreateBid', + order: undefined, + provider: '', + price: undefined, + deposit: undefined, + resourcesOffer: [], + }; +} + +export const MsgCreateBid = { + $type: 'akash.market.v1beta5.MsgCreateBid' as const, + + encode( + message: MsgCreateBid, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.order !== undefined) { + OrderID.encode(message.order, writer.uint32(10).fork()).ldelim(); + } + if (message.provider !== '') { + writer.uint32(18).string(message.provider); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); + } + if (message.deposit !== undefined) { + Coin.encode(message.deposit, writer.uint32(34).fork()).ldelim(); + } + for (const v of message.resourcesOffer) { + ResourceOffer.encode(v!, writer.uint32(42).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateBid { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateBid(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.order = OrderID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.provider = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.deposit = Coin.decode(reader, reader.uint32()); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.resourcesOffer.push( + ResourceOffer.decode(reader, reader.uint32()), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCreateBid { + return { + $type: MsgCreateBid.$type, + order: isSet(object.order) ? OrderID.fromJSON(object.order) : undefined, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : '', + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + deposit: isSet(object.deposit) + ? Coin.fromJSON(object.deposit) + : undefined, + resourcesOffer: globalThis.Array.isArray(object?.resourcesOffer) + ? object.resourcesOffer.map((e: any) => ResourceOffer.fromJSON(e)) + : [], + }; + }, + + toJSON(message: MsgCreateBid): unknown { + const obj: any = {}; + if (message.order !== undefined) { + obj.order = OrderID.toJSON(message.order); + } + if (message.provider !== '') { + obj.provider = message.provider; + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + if (message.deposit !== undefined) { + obj.deposit = Coin.toJSON(message.deposit); + } + if (message.resourcesOffer?.length) { + obj.resourcesOffer = message.resourcesOffer.map((e) => + ResourceOffer.toJSON(e), + ); + } + return obj; + }, + + create(base?: DeepPartial): MsgCreateBid { + return MsgCreateBid.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCreateBid { + const message = createBaseMsgCreateBid(); + message.order = + object.order !== undefined && object.order !== null + ? OrderID.fromPartial(object.order) + : undefined; + message.provider = object.provider ?? ''; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + message.deposit = + object.deposit !== undefined && object.deposit !== null + ? Coin.fromPartial(object.deposit) + : undefined; + message.resourcesOffer = + object.resourcesOffer?.map((e) => ResourceOffer.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateBid.$type, MsgCreateBid); + +function createBaseMsgCreateBidResponse(): MsgCreateBidResponse { + return { $type: 'akash.market.v1beta5.MsgCreateBidResponse' }; +} + +export const MsgCreateBidResponse = { + $type: 'akash.market.v1beta5.MsgCreateBidResponse' as const, + + encode( + _: MsgCreateBidResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgCreateBidResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateBidResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCreateBidResponse { + return { $type: MsgCreateBidResponse.$type }; + }, + + toJSON(_: MsgCreateBidResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCreateBidResponse { + return MsgCreateBidResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCreateBidResponse { + const message = createBaseMsgCreateBidResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateBidResponse.$type, MsgCreateBidResponse); + +function createBaseMsgCloseBid(): MsgCloseBid { + return { $type: 'akash.market.v1beta5.MsgCloseBid', bidId: undefined }; +} + +export const MsgCloseBid = { + $type: 'akash.market.v1beta5.MsgCloseBid' as const, + + encode( + message: MsgCloseBid, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.bidId !== undefined) { + BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBid { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseBid(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bidId = BidID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCloseBid { + return { + $type: MsgCloseBid.$type, + bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, + }; + }, + + toJSON(message: MsgCloseBid): unknown { + const obj: any = {}; + if (message.bidId !== undefined) { + obj.bidId = BidID.toJSON(message.bidId); + } + return obj; + }, + + create(base?: DeepPartial): MsgCloseBid { + return MsgCloseBid.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCloseBid { + const message = createBaseMsgCloseBid(); + message.bidId = + object.bidId !== undefined && object.bidId !== null + ? BidID.fromPartial(object.bidId) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseBid.$type, MsgCloseBid); + +function createBaseMsgCloseBidResponse(): MsgCloseBidResponse { + return { $type: 'akash.market.v1beta5.MsgCloseBidResponse' }; +} + +export const MsgCloseBidResponse = { + $type: 'akash.market.v1beta5.MsgCloseBidResponse' as const, + + encode( + _: MsgCloseBidResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseBidResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseBidResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCloseBidResponse { + return { $type: MsgCloseBidResponse.$type }; + }, + + toJSON(_: MsgCloseBidResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCloseBidResponse { + return MsgCloseBidResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCloseBidResponse { + const message = createBaseMsgCloseBidResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseBidResponse.$type, MsgCloseBidResponse); + +function createBaseBidID(): BidID { + return { + $type: 'akash.market.v1beta5.BidID', + owner: '', + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + provider: '', + }; +} + +export const BidID = { + $type: 'akash.market.v1beta5.BidID' as const, + + encode(message: BidID, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.provider !== '') { + writer.uint32(42).string(message.provider); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): BidID { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseBidID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = reader.uint64() as Long; + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.provider = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): BidID { + return { + $type: BidID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : '', + }; + }, + + toJSON(message: BidID): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.provider !== '') { + obj.provider = message.provider; + } + return obj; + }, + + create(base?: DeepPartial): BidID { + return BidID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): BidID { + const message = createBaseBidID(); + message.owner = object.owner ?? ''; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.provider = object.provider ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(BidID.$type, BidID); + +function createBaseBid(): Bid { + return { + $type: 'akash.market.v1beta5.Bid', + bidId: undefined, + state: 0, + price: undefined, + createdAt: Long.ZERO, + resourcesOffer: [], + }; +} + +export const Bid = { + $type: 'akash.market.v1beta5.Bid' as const, + + encode(message: Bid, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.bidId !== undefined) { + BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt); + } + for (const v of message.resourcesOffer) { + ResourceOffer.encode(v!, writer.uint32(42).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Bid { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseBid(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bidId = BidID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = reader.int64() as Long; + continue; + case 5: + if (tag !== 42) { + break; + } + + message.resourcesOffer.push( + ResourceOffer.decode(reader, reader.uint32()), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Bid { + return { + $type: Bid.$type, + bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, + state: isSet(object.state) ? bid_StateFromJSON(object.state) : 0, + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + resourcesOffer: globalThis.Array.isArray(object?.resourcesOffer) + ? object.resourcesOffer.map((e: any) => ResourceOffer.fromJSON(e)) + : [], + }; + }, + + toJSON(message: Bid): unknown { + const obj: any = {}; + if (message.bidId !== undefined) { + obj.bidId = BidID.toJSON(message.bidId); + } + if (message.state !== 0) { + obj.state = bid_StateToJSON(message.state); + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + if (message.resourcesOffer?.length) { + obj.resourcesOffer = message.resourcesOffer.map((e) => + ResourceOffer.toJSON(e), + ); + } + return obj; + }, + + create(base?: DeepPartial): Bid { + return Bid.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Bid { + const message = createBaseBid(); + message.bidId = + object.bidId !== undefined && object.bidId !== null + ? BidID.fromPartial(object.bidId) + : undefined; + message.state = object.state ?? 0; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + message.resourcesOffer = + object.resourcesOffer?.map((e) => ResourceOffer.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(Bid.$type, Bid); + +function createBaseBidFilters(): BidFilters { + return { + $type: 'akash.market.v1beta5.BidFilters', + owner: '', + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + provider: '', + state: '', + }; +} + +export const BidFilters = { + $type: 'akash.market.v1beta5.BidFilters' as const, + + encode( + message: BidFilters, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.provider !== '') { + writer.uint32(42).string(message.provider); + } + if (message.state !== '') { + writer.uint32(50).string(message.state); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): BidFilters { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseBidFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = reader.uint64() as Long; + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.provider = reader.string(); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): BidFilters { + return { + $type: BidFilters.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : '', + state: isSet(object.state) ? globalThis.String(object.state) : '', + }; + }, + + toJSON(message: BidFilters): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.provider !== '') { + obj.provider = message.provider; + } + if (message.state !== '') { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): BidFilters { + return BidFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): BidFilters { + const message = createBaseBidFilters(); + message.owner = object.owner ?? ''; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.provider = object.provider ?? ''; + message.state = object.state ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(BidFilters.$type, BidFilters); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/market/v1beta5/genesis.ts b/ts/src/generated/akash/market/v1beta5/genesis.ts new file mode 100644 index 00000000..b16fdd95 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/genesis.ts @@ -0,0 +1,175 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Bid } from './bid'; +import { Lease } from './lease'; +import { Order } from './order'; +import { Params } from './params'; + +/** GenesisState defines the basic genesis state used by market module */ +export interface GenesisState { + $type: 'akash.market.v1beta5.GenesisState'; + params: Params | undefined; + orders: Order[]; + leases: Lease[]; + bids: Bid[]; +} + +function createBaseGenesisState(): GenesisState { + return { + $type: 'akash.market.v1beta5.GenesisState', + params: undefined, + orders: [], + leases: [], + bids: [], + }; +} + +export const GenesisState = { + $type: 'akash.market.v1beta5.GenesisState' as const, + + encode( + message: GenesisState, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.params !== undefined) { + Params.encode(message.params, writer.uint32(10).fork()).ldelim(); + } + for (const v of message.orders) { + Order.encode(v!, writer.uint32(18).fork()).ldelim(); + } + for (const v of message.leases) { + Lease.encode(v!, writer.uint32(26).fork()).ldelim(); + } + for (const v of message.bids) { + Bid.encode(v!, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.params = Params.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.orders.push(Order.decode(reader, reader.uint32())); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.leases.push(Lease.decode(reader, reader.uint32())); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.bids.push(Bid.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + params: isSet(object.params) ? Params.fromJSON(object.params) : undefined, + orders: globalThis.Array.isArray(object?.orders) + ? object.orders.map((e: any) => Order.fromJSON(e)) + : [], + leases: globalThis.Array.isArray(object?.leases) + ? object.leases.map((e: any) => Lease.fromJSON(e)) + : [], + bids: globalThis.Array.isArray(object?.bids) + ? object.bids.map((e: any) => Bid.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.params !== undefined) { + obj.params = Params.toJSON(message.params); + } + if (message.orders?.length) { + obj.orders = message.orders.map((e) => Order.toJSON(e)); + } + if (message.leases?.length) { + obj.leases = message.leases.map((e) => Lease.toJSON(e)); + } + if (message.bids?.length) { + obj.bids = message.bids.map((e) => Bid.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.params = + object.params !== undefined && object.params !== null + ? Params.fromPartial(object.params) + : undefined; + message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; + message.leases = object.leases?.map((e) => Lease.fromPartial(e)) || []; + message.bids = object.bids?.map((e) => Bid.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/market/v1beta5/lease.ts b/ts/src/generated/akash/market/v1beta5/lease.ts new file mode 100644 index 00000000..e9060982 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/lease.ts @@ -0,0 +1,980 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { DecCoin } from '../../../cosmos/base/v1beta1/coin'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { BidID } from './bid'; + +/** LeaseID stores bid details of lease */ +export interface LeaseID { + $type: 'akash.market.v1beta5.LeaseID'; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + provider: string; +} + +/** Lease stores LeaseID, state of lease and price */ +export interface Lease { + $type: 'akash.market.v1beta5.Lease'; + leaseId: LeaseID | undefined; + state: Lease_State; + price: DecCoin | undefined; + createdAt: Long; + closedOn: Long; +} + +/** State is an enum which refers to state of lease */ +export enum Lease_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** active - LeaseActive denotes state for lease active */ + active = 1, + /** insufficient_funds - LeaseInsufficientFunds denotes state for lease insufficient_funds */ + insufficient_funds = 2, + /** closed - LeaseClosed denotes state for lease closed */ + closed = 3, + UNRECOGNIZED = -1, +} + +export function lease_StateFromJSON(object: any): Lease_State { + switch (object) { + case 0: + case 'invalid': + return Lease_State.invalid; + case 1: + case 'active': + return Lease_State.active; + case 2: + case 'insufficient_funds': + return Lease_State.insufficient_funds; + case 3: + case 'closed': + return Lease_State.closed; + case -1: + case 'UNRECOGNIZED': + default: + return Lease_State.UNRECOGNIZED; + } +} + +export function lease_StateToJSON(object: Lease_State): string { + switch (object) { + case Lease_State.invalid: + return 'invalid'; + case Lease_State.active: + return 'active'; + case Lease_State.insufficient_funds: + return 'insufficient_funds'; + case Lease_State.closed: + return 'closed'; + case Lease_State.UNRECOGNIZED: + default: + return 'UNRECOGNIZED'; + } +} + +/** LeaseFilters defines flags for lease list filter */ +export interface LeaseFilters { + $type: 'akash.market.v1beta5.LeaseFilters'; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + provider: string; + state: string; +} + +/** MsgCreateLease is sent to create a lease */ +export interface MsgCreateLease { + $type: 'akash.market.v1beta5.MsgCreateLease'; + bidId: BidID | undefined; +} + +/** MsgCreateLeaseResponse is the response from creating a lease */ +export interface MsgCreateLeaseResponse { + $type: 'akash.market.v1beta5.MsgCreateLeaseResponse'; +} + +/** MsgWithdrawLease defines an SDK message for closing bid */ +export interface MsgWithdrawLease { + $type: 'akash.market.v1beta5.MsgWithdrawLease'; + bidId: LeaseID | undefined; +} + +/** MsgWithdrawLeaseResponse defines the Msg/WithdrawLease response type. */ +export interface MsgWithdrawLeaseResponse { + $type: 'akash.market.v1beta5.MsgWithdrawLeaseResponse'; +} + +/** MsgCloseLease defines an SDK message for closing order */ +export interface MsgCloseLease { + $type: 'akash.market.v1beta5.MsgCloseLease'; + leaseId: LeaseID | undefined; +} + +/** MsgCloseLeaseResponse defines the Msg/CloseLease response type. */ +export interface MsgCloseLeaseResponse { + $type: 'akash.market.v1beta5.MsgCloseLeaseResponse'; +} + +function createBaseLeaseID(): LeaseID { + return { + $type: 'akash.market.v1beta5.LeaseID', + owner: '', + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + provider: '', + }; +} + +export const LeaseID = { + $type: 'akash.market.v1beta5.LeaseID' as const, + + encode( + message: LeaseID, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.provider !== '') { + writer.uint32(42).string(message.provider); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LeaseID { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseLeaseID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = reader.uint64() as Long; + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.provider = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): LeaseID { + return { + $type: LeaseID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : '', + }; + }, + + toJSON(message: LeaseID): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.provider !== '') { + obj.provider = message.provider; + } + return obj; + }, + + create(base?: DeepPartial): LeaseID { + return LeaseID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): LeaseID { + const message = createBaseLeaseID(); + message.owner = object.owner ?? ''; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.provider = object.provider ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(LeaseID.$type, LeaseID); + +function createBaseLease(): Lease { + return { + $type: 'akash.market.v1beta5.Lease', + leaseId: undefined, + state: 0, + price: undefined, + createdAt: Long.ZERO, + closedOn: Long.ZERO, + }; +} + +export const Lease = { + $type: 'akash.market.v1beta5.Lease' as const, + + encode(message: Lease, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.leaseId !== undefined) { + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.price !== undefined) { + DecCoin.encode(message.price, writer.uint32(26).fork()).ldelim(); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt); + } + if (!message.closedOn.equals(Long.ZERO)) { + writer.uint32(40).int64(message.closedOn); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Lease { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseLease(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.leaseId = LeaseID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.price = DecCoin.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = reader.int64() as Long; + continue; + case 5: + if (tag !== 40) { + break; + } + + message.closedOn = reader.int64() as Long; + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Lease { + return { + $type: Lease.$type, + leaseId: isSet(object.leaseId) + ? LeaseID.fromJSON(object.leaseId) + : undefined, + state: isSet(object.state) ? lease_StateFromJSON(object.state) : 0, + price: isSet(object.price) ? DecCoin.fromJSON(object.price) : undefined, + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + closedOn: isSet(object.closedOn) + ? Long.fromValue(object.closedOn) + : Long.ZERO, + }; + }, + + toJSON(message: Lease): unknown { + const obj: any = {}; + if (message.leaseId !== undefined) { + obj.leaseId = LeaseID.toJSON(message.leaseId); + } + if (message.state !== 0) { + obj.state = lease_StateToJSON(message.state); + } + if (message.price !== undefined) { + obj.price = DecCoin.toJSON(message.price); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + if (!message.closedOn.equals(Long.ZERO)) { + obj.closedOn = (message.closedOn || Long.ZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): Lease { + return Lease.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Lease { + const message = createBaseLease(); + message.leaseId = + object.leaseId !== undefined && object.leaseId !== null + ? LeaseID.fromPartial(object.leaseId) + : undefined; + message.state = object.state ?? 0; + message.price = + object.price !== undefined && object.price !== null + ? DecCoin.fromPartial(object.price) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + message.closedOn = + object.closedOn !== undefined && object.closedOn !== null + ? Long.fromValue(object.closedOn) + : Long.ZERO; + return message; + }, +}; + +messageTypeRegistry.set(Lease.$type, Lease); + +function createBaseLeaseFilters(): LeaseFilters { + return { + $type: 'akash.market.v1beta5.LeaseFilters', + owner: '', + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + provider: '', + state: '', + }; +} + +export const LeaseFilters = { + $type: 'akash.market.v1beta5.LeaseFilters' as const, + + encode( + message: LeaseFilters, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.provider !== '') { + writer.uint32(42).string(message.provider); + } + if (message.state !== '') { + writer.uint32(50).string(message.state); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): LeaseFilters { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseLeaseFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = reader.uint64() as Long; + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.provider = reader.string(); + continue; + case 6: + if (tag !== 50) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): LeaseFilters { + return { + $type: LeaseFilters.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + provider: isSet(object.provider) + ? globalThis.String(object.provider) + : '', + state: isSet(object.state) ? globalThis.String(object.state) : '', + }; + }, + + toJSON(message: LeaseFilters): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.provider !== '') { + obj.provider = message.provider; + } + if (message.state !== '') { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): LeaseFilters { + return LeaseFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): LeaseFilters { + const message = createBaseLeaseFilters(); + message.owner = object.owner ?? ''; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.provider = object.provider ?? ''; + message.state = object.state ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(LeaseFilters.$type, LeaseFilters); + +function createBaseMsgCreateLease(): MsgCreateLease { + return { $type: 'akash.market.v1beta5.MsgCreateLease', bidId: undefined }; +} + +export const MsgCreateLease = { + $type: 'akash.market.v1beta5.MsgCreateLease' as const, + + encode( + message: MsgCreateLease, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.bidId !== undefined) { + BidID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateLease { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateLease(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bidId = BidID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCreateLease { + return { + $type: MsgCreateLease.$type, + bidId: isSet(object.bidId) ? BidID.fromJSON(object.bidId) : undefined, + }; + }, + + toJSON(message: MsgCreateLease): unknown { + const obj: any = {}; + if (message.bidId !== undefined) { + obj.bidId = BidID.toJSON(message.bidId); + } + return obj; + }, + + create(base?: DeepPartial): MsgCreateLease { + return MsgCreateLease.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCreateLease { + const message = createBaseMsgCreateLease(); + message.bidId = + object.bidId !== undefined && object.bidId !== null + ? BidID.fromPartial(object.bidId) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateLease.$type, MsgCreateLease); + +function createBaseMsgCreateLeaseResponse(): MsgCreateLeaseResponse { + return { $type: 'akash.market.v1beta5.MsgCreateLeaseResponse' }; +} + +export const MsgCreateLeaseResponse = { + $type: 'akash.market.v1beta5.MsgCreateLeaseResponse' as const, + + encode( + _: MsgCreateLeaseResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgCreateLeaseResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateLeaseResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCreateLeaseResponse { + return { $type: MsgCreateLeaseResponse.$type }; + }, + + toJSON(_: MsgCreateLeaseResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCreateLeaseResponse { + return MsgCreateLeaseResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCreateLeaseResponse { + const message = createBaseMsgCreateLeaseResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateLeaseResponse.$type, MsgCreateLeaseResponse); + +function createBaseMsgWithdrawLease(): MsgWithdrawLease { + return { $type: 'akash.market.v1beta5.MsgWithdrawLease', bidId: undefined }; +} + +export const MsgWithdrawLease = { + $type: 'akash.market.v1beta5.MsgWithdrawLease' as const, + + encode( + message: MsgWithdrawLease, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.bidId !== undefined) { + LeaseID.encode(message.bidId, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgWithdrawLease { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgWithdrawLease(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bidId = LeaseID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgWithdrawLease { + return { + $type: MsgWithdrawLease.$type, + bidId: isSet(object.bidId) ? LeaseID.fromJSON(object.bidId) : undefined, + }; + }, + + toJSON(message: MsgWithdrawLease): unknown { + const obj: any = {}; + if (message.bidId !== undefined) { + obj.bidId = LeaseID.toJSON(message.bidId); + } + return obj; + }, + + create(base?: DeepPartial): MsgWithdrawLease { + return MsgWithdrawLease.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgWithdrawLease { + const message = createBaseMsgWithdrawLease(); + message.bidId = + object.bidId !== undefined && object.bidId !== null + ? LeaseID.fromPartial(object.bidId) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgWithdrawLease.$type, MsgWithdrawLease); + +function createBaseMsgWithdrawLeaseResponse(): MsgWithdrawLeaseResponse { + return { $type: 'akash.market.v1beta5.MsgWithdrawLeaseResponse' }; +} + +export const MsgWithdrawLeaseResponse = { + $type: 'akash.market.v1beta5.MsgWithdrawLeaseResponse' as const, + + encode( + _: MsgWithdrawLeaseResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgWithdrawLeaseResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgWithdrawLeaseResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgWithdrawLeaseResponse { + return { $type: MsgWithdrawLeaseResponse.$type }; + }, + + toJSON(_: MsgWithdrawLeaseResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgWithdrawLeaseResponse { + return MsgWithdrawLeaseResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgWithdrawLeaseResponse { + const message = createBaseMsgWithdrawLeaseResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgWithdrawLeaseResponse.$type, + MsgWithdrawLeaseResponse, +); + +function createBaseMsgCloseLease(): MsgCloseLease { + return { $type: 'akash.market.v1beta5.MsgCloseLease', leaseId: undefined }; +} + +export const MsgCloseLease = { + $type: 'akash.market.v1beta5.MsgCloseLease' as const, + + encode( + message: MsgCloseLease, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.leaseId !== undefined) { + LeaseID.encode(message.leaseId, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgCloseLease { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseLease(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.leaseId = LeaseID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCloseLease { + return { + $type: MsgCloseLease.$type, + leaseId: isSet(object.leaseId) + ? LeaseID.fromJSON(object.leaseId) + : undefined, + }; + }, + + toJSON(message: MsgCloseLease): unknown { + const obj: any = {}; + if (message.leaseId !== undefined) { + obj.leaseId = LeaseID.toJSON(message.leaseId); + } + return obj; + }, + + create(base?: DeepPartial): MsgCloseLease { + return MsgCloseLease.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCloseLease { + const message = createBaseMsgCloseLease(); + message.leaseId = + object.leaseId !== undefined && object.leaseId !== null + ? LeaseID.fromPartial(object.leaseId) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseLease.$type, MsgCloseLease); + +function createBaseMsgCloseLeaseResponse(): MsgCloseLeaseResponse { + return { $type: 'akash.market.v1beta5.MsgCloseLeaseResponse' }; +} + +export const MsgCloseLeaseResponse = { + $type: 'akash.market.v1beta5.MsgCloseLeaseResponse' as const, + + encode( + _: MsgCloseLeaseResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgCloseLeaseResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCloseLeaseResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCloseLeaseResponse { + return { $type: MsgCloseLeaseResponse.$type }; + }, + + toJSON(_: MsgCloseLeaseResponse): unknown { + const obj: any = {}; + return obj; + }, + + create(base?: DeepPartial): MsgCloseLeaseResponse { + return MsgCloseLeaseResponse.fromPartial(base ?? {}); + }, + fromPartial(_: DeepPartial): MsgCloseLeaseResponse { + const message = createBaseMsgCloseLeaseResponse(); + return message; + }, +}; + +messageTypeRegistry.set(MsgCloseLeaseResponse.$type, MsgCloseLeaseResponse); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/market/v1beta5/order.ts b/ts/src/generated/akash/market/v1beta5/order.ts new file mode 100644 index 00000000..c4110dd6 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/order.ts @@ -0,0 +1,502 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { GroupSpec } from '../../deployment/v1beta4/groupspec'; + +/** OrderID stores owner and all other seq numbers */ +export interface OrderID { + $type: 'akash.market.v1beta5.OrderID'; + owner: string; + dseq: Long; + gseq: number; + oseq: number; +} + +/** Order stores orderID, state of order and other details */ +export interface Order { + $type: 'akash.market.v1beta5.Order'; + orderId: OrderID | undefined; + state: Order_State; + spec: GroupSpec | undefined; + createdAt: Long; +} + +/** State is an enum which refers to state of order */ +export enum Order_State { + /** invalid - Prefix should start with 0 in enum. So declaring dummy state */ + invalid = 0, + /** open - OrderOpen denotes state for order open */ + open = 1, + /** active - OrderMatched denotes state for order matched */ + active = 2, + /** closed - OrderClosed denotes state for order lost */ + closed = 3, + UNRECOGNIZED = -1, +} + +export function order_StateFromJSON(object: any): Order_State { + switch (object) { + case 0: + case 'invalid': + return Order_State.invalid; + case 1: + case 'open': + return Order_State.open; + case 2: + case 'active': + return Order_State.active; + case 3: + case 'closed': + return Order_State.closed; + case -1: + case 'UNRECOGNIZED': + default: + return Order_State.UNRECOGNIZED; + } +} + +export function order_StateToJSON(object: Order_State): string { + switch (object) { + case Order_State.invalid: + return 'invalid'; + case Order_State.open: + return 'open'; + case Order_State.active: + return 'active'; + case Order_State.closed: + return 'closed'; + case Order_State.UNRECOGNIZED: + default: + return 'UNRECOGNIZED'; + } +} + +/** OrderFilters defines flags for order list filter */ +export interface OrderFilters { + $type: 'akash.market.v1beta5.OrderFilters'; + owner: string; + dseq: Long; + gseq: number; + oseq: number; + state: string; +} + +function createBaseOrderID(): OrderID { + return { + $type: 'akash.market.v1beta5.OrderID', + owner: '', + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + }; +} + +export const OrderID = { + $type: 'akash.market.v1beta5.OrderID' as const, + + encode( + message: OrderID, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OrderID { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseOrderID(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = reader.uint64() as Long; + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): OrderID { + return { + $type: OrderID.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + }; + }, + + toJSON(message: OrderID): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + return obj; + }, + + create(base?: DeepPartial): OrderID { + return OrderID.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): OrderID { + const message = createBaseOrderID(); + message.owner = object.owner ?? ''; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(OrderID.$type, OrderID); + +function createBaseOrder(): Order { + return { + $type: 'akash.market.v1beta5.Order', + orderId: undefined, + state: 0, + spec: undefined, + createdAt: Long.ZERO, + }; +} + +export const Order = { + $type: 'akash.market.v1beta5.Order' as const, + + encode(message: Order, writer: _m0.Writer = _m0.Writer.create()): _m0.Writer { + if (message.orderId !== undefined) { + OrderID.encode(message.orderId, writer.uint32(10).fork()).ldelim(); + } + if (message.state !== 0) { + writer.uint32(16).int32(message.state); + } + if (message.spec !== undefined) { + GroupSpec.encode(message.spec, writer.uint32(26).fork()).ldelim(); + } + if (!message.createdAt.equals(Long.ZERO)) { + writer.uint32(32).int64(message.createdAt); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Order { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseOrder(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.orderId = OrderID.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.state = reader.int32() as any; + continue; + case 3: + if (tag !== 26) { + break; + } + + message.spec = GroupSpec.decode(reader, reader.uint32()); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.createdAt = reader.int64() as Long; + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Order { + return { + $type: Order.$type, + orderId: isSet(object.orderId) + ? OrderID.fromJSON(object.orderId) + : undefined, + state: isSet(object.state) ? order_StateFromJSON(object.state) : 0, + spec: isSet(object.spec) ? GroupSpec.fromJSON(object.spec) : undefined, + createdAt: isSet(object.createdAt) + ? Long.fromValue(object.createdAt) + : Long.ZERO, + }; + }, + + toJSON(message: Order): unknown { + const obj: any = {}; + if (message.orderId !== undefined) { + obj.orderId = OrderID.toJSON(message.orderId); + } + if (message.state !== 0) { + obj.state = order_StateToJSON(message.state); + } + if (message.spec !== undefined) { + obj.spec = GroupSpec.toJSON(message.spec); + } + if (!message.createdAt.equals(Long.ZERO)) { + obj.createdAt = (message.createdAt || Long.ZERO).toString(); + } + return obj; + }, + + create(base?: DeepPartial): Order { + return Order.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Order { + const message = createBaseOrder(); + message.orderId = + object.orderId !== undefined && object.orderId !== null + ? OrderID.fromPartial(object.orderId) + : undefined; + message.state = object.state ?? 0; + message.spec = + object.spec !== undefined && object.spec !== null + ? GroupSpec.fromPartial(object.spec) + : undefined; + message.createdAt = + object.createdAt !== undefined && object.createdAt !== null + ? Long.fromValue(object.createdAt) + : Long.ZERO; + return message; + }, +}; + +messageTypeRegistry.set(Order.$type, Order); + +function createBaseOrderFilters(): OrderFilters { + return { + $type: 'akash.market.v1beta5.OrderFilters', + owner: '', + dseq: Long.UZERO, + gseq: 0, + oseq: 0, + state: '', + }; +} + +export const OrderFilters = { + $type: 'akash.market.v1beta5.OrderFilters' as const, + + encode( + message: OrderFilters, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (!message.dseq.equals(Long.UZERO)) { + writer.uint32(16).uint64(message.dseq); + } + if (message.gseq !== 0) { + writer.uint32(24).uint32(message.gseq); + } + if (message.oseq !== 0) { + writer.uint32(32).uint32(message.oseq); + } + if (message.state !== '') { + writer.uint32(42).string(message.state); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): OrderFilters { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseOrderFilters(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.dseq = reader.uint64() as Long; + continue; + case 3: + if (tag !== 24) { + break; + } + + message.gseq = reader.uint32(); + continue; + case 4: + if (tag !== 32) { + break; + } + + message.oseq = reader.uint32(); + continue; + case 5: + if (tag !== 42) { + break; + } + + message.state = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): OrderFilters { + return { + $type: OrderFilters.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + dseq: isSet(object.dseq) ? Long.fromValue(object.dseq) : Long.UZERO, + gseq: isSet(object.gseq) ? globalThis.Number(object.gseq) : 0, + oseq: isSet(object.oseq) ? globalThis.Number(object.oseq) : 0, + state: isSet(object.state) ? globalThis.String(object.state) : '', + }; + }, + + toJSON(message: OrderFilters): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (!message.dseq.equals(Long.UZERO)) { + obj.dseq = (message.dseq || Long.UZERO).toString(); + } + if (message.gseq !== 0) { + obj.gseq = Math.round(message.gseq); + } + if (message.oseq !== 0) { + obj.oseq = Math.round(message.oseq); + } + if (message.state !== '') { + obj.state = message.state; + } + return obj; + }, + + create(base?: DeepPartial): OrderFilters { + return OrderFilters.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): OrderFilters { + const message = createBaseOrderFilters(); + message.owner = object.owner ?? ''; + message.dseq = + object.dseq !== undefined && object.dseq !== null + ? Long.fromValue(object.dseq) + : Long.UZERO; + message.gseq = object.gseq ?? 0; + message.oseq = object.oseq ?? 0; + message.state = object.state ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(OrderFilters.$type, OrderFilters); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/market/v1beta5/params.ts b/ts/src/generated/akash/market/v1beta5/params.ts new file mode 100644 index 00000000..4b996a35 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/params.ts @@ -0,0 +1,136 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { Coin } from '../../../cosmos/base/v1beta1/coin'; +import { messageTypeRegistry } from '../../../typeRegistry'; + +/** Params is the params for the x/market module */ +export interface Params { + $type: 'akash.market.v1beta5.Params'; + bidMinDeposit: Coin | undefined; + orderMaxBids: number; +} + +function createBaseParams(): Params { + return { + $type: 'akash.market.v1beta5.Params', + bidMinDeposit: undefined, + orderMaxBids: 0, + }; +} + +export const Params = { + $type: 'akash.market.v1beta5.Params' as const, + + encode( + message: Params, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.bidMinDeposit !== undefined) { + Coin.encode(message.bidMinDeposit, writer.uint32(10).fork()).ldelim(); + } + if (message.orderMaxBids !== 0) { + writer.uint32(16).uint32(message.orderMaxBids); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Params { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseParams(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bidMinDeposit = Coin.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 16) { + break; + } + + message.orderMaxBids = reader.uint32(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Params { + return { + $type: Params.$type, + bidMinDeposit: isSet(object.bidMinDeposit) + ? Coin.fromJSON(object.bidMinDeposit) + : undefined, + orderMaxBids: isSet(object.orderMaxBids) + ? globalThis.Number(object.orderMaxBids) + : 0, + }; + }, + + toJSON(message: Params): unknown { + const obj: any = {}; + if (message.bidMinDeposit !== undefined) { + obj.bidMinDeposit = Coin.toJSON(message.bidMinDeposit); + } + if (message.orderMaxBids !== 0) { + obj.orderMaxBids = Math.round(message.orderMaxBids); + } + return obj; + }, + + create(base?: DeepPartial): Params { + return Params.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Params { + const message = createBaseParams(); + message.bidMinDeposit = + object.bidMinDeposit !== undefined && object.bidMinDeposit !== null + ? Coin.fromPartial(object.bidMinDeposit) + : undefined; + message.orderMaxBids = object.orderMaxBids ?? 0; + return message; + }, +}; + +messageTypeRegistry.set(Params.$type, Params); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/market/v1beta5/query.ts b/ts/src/generated/akash/market/v1beta5/query.ts new file mode 100644 index 00000000..dd751223 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/query.ts @@ -0,0 +1,1275 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { + PageRequest, + PageResponse, +} from '../../../cosmos/base/query/v1beta1/pagination'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Account, FractionalPayment } from '../../escrow/v1beta3/types'; +import { Bid, BidFilters, BidID } from './bid'; +import { Lease, LeaseFilters, LeaseID } from './lease'; +import { Order, OrderFilters, OrderID } from './order'; + +/** QueryOrdersRequest is request type for the Query/Orders RPC method */ +export interface QueryOrdersRequest { + $type: 'akash.market.v1beta5.QueryOrdersRequest'; + filters: OrderFilters | undefined; + pagination: PageRequest | undefined; +} + +/** QueryOrdersResponse is response type for the Query/Orders RPC method */ +export interface QueryOrdersResponse { + $type: 'akash.market.v1beta5.QueryOrdersResponse'; + orders: Order[]; + pagination: PageResponse | undefined; +} + +/** QueryOrderRequest is request type for the Query/Order RPC method */ +export interface QueryOrderRequest { + $type: 'akash.market.v1beta5.QueryOrderRequest'; + id: OrderID | undefined; +} + +/** QueryOrderResponse is response type for the Query/Order RPC method */ +export interface QueryOrderResponse { + $type: 'akash.market.v1beta5.QueryOrderResponse'; + order: Order | undefined; +} + +/** QueryBidsRequest is request type for the Query/Bids RPC method */ +export interface QueryBidsRequest { + $type: 'akash.market.v1beta5.QueryBidsRequest'; + filters: BidFilters | undefined; + pagination: PageRequest | undefined; +} + +/** QueryBidsResponse is response type for the Query/Bids RPC method */ +export interface QueryBidsResponse { + $type: 'akash.market.v1beta5.QueryBidsResponse'; + bids: QueryBidResponse[]; + pagination: PageResponse | undefined; +} + +/** QueryBidRequest is request type for the Query/Bid RPC method */ +export interface QueryBidRequest { + $type: 'akash.market.v1beta5.QueryBidRequest'; + id: BidID | undefined; +} + +/** QueryBidResponse is response type for the Query/Bid RPC method */ +export interface QueryBidResponse { + $type: 'akash.market.v1beta5.QueryBidResponse'; + bid: Bid | undefined; + escrowAccount: Account | undefined; +} + +/** QueryLeasesRequest is request type for the Query/Leases RPC method */ +export interface QueryLeasesRequest { + $type: 'akash.market.v1beta5.QueryLeasesRequest'; + filters: LeaseFilters | undefined; + pagination: PageRequest | undefined; +} + +/** QueryLeasesResponse is response type for the Query/Leases RPC method */ +export interface QueryLeasesResponse { + $type: 'akash.market.v1beta5.QueryLeasesResponse'; + leases: QueryLeaseResponse[]; + pagination: PageResponse | undefined; +} + +/** QueryLeaseRequest is request type for the Query/Lease RPC method */ +export interface QueryLeaseRequest { + $type: 'akash.market.v1beta5.QueryLeaseRequest'; + id: LeaseID | undefined; +} + +/** QueryLeaseResponse is response type for the Query/Lease RPC method */ +export interface QueryLeaseResponse { + $type: 'akash.market.v1beta5.QueryLeaseResponse'; + lease: Lease | undefined; + escrowPayment: FractionalPayment | undefined; +} + +function createBaseQueryOrdersRequest(): QueryOrdersRequest { + return { + $type: 'akash.market.v1beta5.QueryOrdersRequest', + filters: undefined, + pagination: undefined, + }; +} + +export const QueryOrdersRequest = { + $type: 'akash.market.v1beta5.QueryOrdersRequest' as const, + + encode( + message: QueryOrdersRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.filters !== undefined) { + OrderFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrdersRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryOrdersRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.filters = OrderFilters.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryOrdersRequest { + return { + $type: QueryOrdersRequest.$type, + filters: isSet(object.filters) + ? OrderFilters.fromJSON(object.filters) + : undefined, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryOrdersRequest): unknown { + const obj: any = {}; + if (message.filters !== undefined) { + obj.filters = OrderFilters.toJSON(message.filters); + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryOrdersRequest { + return QueryOrdersRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryOrdersRequest { + const message = createBaseQueryOrdersRequest(); + message.filters = + object.filters !== undefined && object.filters !== null + ? OrderFilters.fromPartial(object.filters) + : undefined; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryOrdersRequest.$type, QueryOrdersRequest); + +function createBaseQueryOrdersResponse(): QueryOrdersResponse { + return { + $type: 'akash.market.v1beta5.QueryOrdersResponse', + orders: [], + pagination: undefined, + }; +} + +export const QueryOrdersResponse = { + $type: 'akash.market.v1beta5.QueryOrdersResponse' as const, + + encode( + message: QueryOrdersResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.orders) { + Order.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork(), + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrdersResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryOrdersResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.orders.push(Order.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryOrdersResponse { + return { + $type: QueryOrdersResponse.$type, + orders: globalThis.Array.isArray(object?.orders) + ? object.orders.map((e: any) => Order.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryOrdersResponse): unknown { + const obj: any = {}; + if (message.orders?.length) { + obj.orders = message.orders.map((e) => Order.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryOrdersResponse { + return QueryOrdersResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryOrdersResponse { + const message = createBaseQueryOrdersResponse(); + message.orders = object.orders?.map((e) => Order.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryOrdersResponse.$type, QueryOrdersResponse); + +function createBaseQueryOrderRequest(): QueryOrderRequest { + return { $type: 'akash.market.v1beta5.QueryOrderRequest', id: undefined }; +} + +export const QueryOrderRequest = { + $type: 'akash.market.v1beta5.QueryOrderRequest' as const, + + encode( + message: QueryOrderRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + OrderID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrderRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryOrderRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = OrderID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryOrderRequest { + return { + $type: QueryOrderRequest.$type, + id: isSet(object.id) ? OrderID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryOrderRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = OrderID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryOrderRequest { + return QueryOrderRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryOrderRequest { + const message = createBaseQueryOrderRequest(); + message.id = + object.id !== undefined && object.id !== null + ? OrderID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryOrderRequest.$type, QueryOrderRequest); + +function createBaseQueryOrderResponse(): QueryOrderResponse { + return { $type: 'akash.market.v1beta5.QueryOrderResponse', order: undefined }; +} + +export const QueryOrderResponse = { + $type: 'akash.market.v1beta5.QueryOrderResponse' as const, + + encode( + message: QueryOrderResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.order !== undefined) { + Order.encode(message.order, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryOrderResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryOrderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.order = Order.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryOrderResponse { + return { + $type: QueryOrderResponse.$type, + order: isSet(object.order) ? Order.fromJSON(object.order) : undefined, + }; + }, + + toJSON(message: QueryOrderResponse): unknown { + const obj: any = {}; + if (message.order !== undefined) { + obj.order = Order.toJSON(message.order); + } + return obj; + }, + + create(base?: DeepPartial): QueryOrderResponse { + return QueryOrderResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryOrderResponse { + const message = createBaseQueryOrderResponse(); + message.order = + object.order !== undefined && object.order !== null + ? Order.fromPartial(object.order) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryOrderResponse.$type, QueryOrderResponse); + +function createBaseQueryBidsRequest(): QueryBidsRequest { + return { + $type: 'akash.market.v1beta5.QueryBidsRequest', + filters: undefined, + pagination: undefined, + }; +} + +export const QueryBidsRequest = { + $type: 'akash.market.v1beta5.QueryBidsRequest' as const, + + encode( + message: QueryBidsRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.filters !== undefined) { + BidFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidsRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryBidsRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.filters = BidFilters.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryBidsRequest { + return { + $type: QueryBidsRequest.$type, + filters: isSet(object.filters) + ? BidFilters.fromJSON(object.filters) + : undefined, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryBidsRequest): unknown { + const obj: any = {}; + if (message.filters !== undefined) { + obj.filters = BidFilters.toJSON(message.filters); + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryBidsRequest { + return QueryBidsRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryBidsRequest { + const message = createBaseQueryBidsRequest(); + message.filters = + object.filters !== undefined && object.filters !== null + ? BidFilters.fromPartial(object.filters) + : undefined; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryBidsRequest.$type, QueryBidsRequest); + +function createBaseQueryBidsResponse(): QueryBidsResponse { + return { + $type: 'akash.market.v1beta5.QueryBidsResponse', + bids: [], + pagination: undefined, + }; +} + +export const QueryBidsResponse = { + $type: 'akash.market.v1beta5.QueryBidsResponse' as const, + + encode( + message: QueryBidsResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.bids) { + QueryBidResponse.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork(), + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidsResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryBidsResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bids.push(QueryBidResponse.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryBidsResponse { + return { + $type: QueryBidsResponse.$type, + bids: globalThis.Array.isArray(object?.bids) + ? object.bids.map((e: any) => QueryBidResponse.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryBidsResponse): unknown { + const obj: any = {}; + if (message.bids?.length) { + obj.bids = message.bids.map((e) => QueryBidResponse.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryBidsResponse { + return QueryBidsResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryBidsResponse { + const message = createBaseQueryBidsResponse(); + message.bids = + object.bids?.map((e) => QueryBidResponse.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryBidsResponse.$type, QueryBidsResponse); + +function createBaseQueryBidRequest(): QueryBidRequest { + return { $type: 'akash.market.v1beta5.QueryBidRequest', id: undefined }; +} + +export const QueryBidRequest = { + $type: 'akash.market.v1beta5.QueryBidRequest' as const, + + encode( + message: QueryBidRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + BidID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryBidRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = BidID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryBidRequest { + return { + $type: QueryBidRequest.$type, + id: isSet(object.id) ? BidID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryBidRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = BidID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryBidRequest { + return QueryBidRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryBidRequest { + const message = createBaseQueryBidRequest(); + message.id = + object.id !== undefined && object.id !== null + ? BidID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryBidRequest.$type, QueryBidRequest); + +function createBaseQueryBidResponse(): QueryBidResponse { + return { + $type: 'akash.market.v1beta5.QueryBidResponse', + bid: undefined, + escrowAccount: undefined, + }; +} + +export const QueryBidResponse = { + $type: 'akash.market.v1beta5.QueryBidResponse' as const, + + encode( + message: QueryBidResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.bid !== undefined) { + Bid.encode(message.bid, writer.uint32(10).fork()).ldelim(); + } + if (message.escrowAccount !== undefined) { + Account.encode(message.escrowAccount, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryBidResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryBidResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.bid = Bid.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.escrowAccount = Account.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryBidResponse { + return { + $type: QueryBidResponse.$type, + bid: isSet(object.bid) ? Bid.fromJSON(object.bid) : undefined, + escrowAccount: isSet(object.escrowAccount) + ? Account.fromJSON(object.escrowAccount) + : undefined, + }; + }, + + toJSON(message: QueryBidResponse): unknown { + const obj: any = {}; + if (message.bid !== undefined) { + obj.bid = Bid.toJSON(message.bid); + } + if (message.escrowAccount !== undefined) { + obj.escrowAccount = Account.toJSON(message.escrowAccount); + } + return obj; + }, + + create(base?: DeepPartial): QueryBidResponse { + return QueryBidResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryBidResponse { + const message = createBaseQueryBidResponse(); + message.bid = + object.bid !== undefined && object.bid !== null + ? Bid.fromPartial(object.bid) + : undefined; + message.escrowAccount = + object.escrowAccount !== undefined && object.escrowAccount !== null + ? Account.fromPartial(object.escrowAccount) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryBidResponse.$type, QueryBidResponse); + +function createBaseQueryLeasesRequest(): QueryLeasesRequest { + return { + $type: 'akash.market.v1beta5.QueryLeasesRequest', + filters: undefined, + pagination: undefined, + }; +} + +export const QueryLeasesRequest = { + $type: 'akash.market.v1beta5.QueryLeasesRequest' as const, + + encode( + message: QueryLeasesRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.filters !== undefined) { + LeaseFilters.encode(message.filters, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(18).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeasesRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryLeasesRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.filters = LeaseFilters.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryLeasesRequest { + return { + $type: QueryLeasesRequest.$type, + filters: isSet(object.filters) + ? LeaseFilters.fromJSON(object.filters) + : undefined, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryLeasesRequest): unknown { + const obj: any = {}; + if (message.filters !== undefined) { + obj.filters = LeaseFilters.toJSON(message.filters); + } + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryLeasesRequest { + return QueryLeasesRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryLeasesRequest { + const message = createBaseQueryLeasesRequest(); + message.filters = + object.filters !== undefined && object.filters !== null + ? LeaseFilters.fromPartial(object.filters) + : undefined; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryLeasesRequest.$type, QueryLeasesRequest); + +function createBaseQueryLeasesResponse(): QueryLeasesResponse { + return { + $type: 'akash.market.v1beta5.QueryLeasesResponse', + leases: [], + pagination: undefined, + }; +} + +export const QueryLeasesResponse = { + $type: 'akash.market.v1beta5.QueryLeasesResponse' as const, + + encode( + message: QueryLeasesResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.leases) { + QueryLeaseResponse.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork(), + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeasesResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryLeasesResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.leases.push( + QueryLeaseResponse.decode(reader, reader.uint32()), + ); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryLeasesResponse { + return { + $type: QueryLeasesResponse.$type, + leases: globalThis.Array.isArray(object?.leases) + ? object.leases.map((e: any) => QueryLeaseResponse.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryLeasesResponse): unknown { + const obj: any = {}; + if (message.leases?.length) { + obj.leases = message.leases.map((e) => QueryLeaseResponse.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryLeasesResponse { + return QueryLeasesResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryLeasesResponse { + const message = createBaseQueryLeasesResponse(); + message.leases = + object.leases?.map((e) => QueryLeaseResponse.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryLeasesResponse.$type, QueryLeasesResponse); + +function createBaseQueryLeaseRequest(): QueryLeaseRequest { + return { $type: 'akash.market.v1beta5.QueryLeaseRequest', id: undefined }; +} + +export const QueryLeaseRequest = { + $type: 'akash.market.v1beta5.QueryLeaseRequest' as const, + + encode( + message: QueryLeaseRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.id !== undefined) { + LeaseID.encode(message.id, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeaseRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryLeaseRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.id = LeaseID.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryLeaseRequest { + return { + $type: QueryLeaseRequest.$type, + id: isSet(object.id) ? LeaseID.fromJSON(object.id) : undefined, + }; + }, + + toJSON(message: QueryLeaseRequest): unknown { + const obj: any = {}; + if (message.id !== undefined) { + obj.id = LeaseID.toJSON(message.id); + } + return obj; + }, + + create(base?: DeepPartial): QueryLeaseRequest { + return QueryLeaseRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryLeaseRequest { + const message = createBaseQueryLeaseRequest(); + message.id = + object.id !== undefined && object.id !== null + ? LeaseID.fromPartial(object.id) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryLeaseRequest.$type, QueryLeaseRequest); + +function createBaseQueryLeaseResponse(): QueryLeaseResponse { + return { + $type: 'akash.market.v1beta5.QueryLeaseResponse', + lease: undefined, + escrowPayment: undefined, + }; +} + +export const QueryLeaseResponse = { + $type: 'akash.market.v1beta5.QueryLeaseResponse' as const, + + encode( + message: QueryLeaseResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.lease !== undefined) { + Lease.encode(message.lease, writer.uint32(10).fork()).ldelim(); + } + if (message.escrowPayment !== undefined) { + FractionalPayment.encode( + message.escrowPayment, + writer.uint32(18).fork(), + ).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): QueryLeaseResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryLeaseResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.lease = Lease.decode(reader, reader.uint32()); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.escrowPayment = FractionalPayment.decode( + reader, + reader.uint32(), + ); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryLeaseResponse { + return { + $type: QueryLeaseResponse.$type, + lease: isSet(object.lease) ? Lease.fromJSON(object.lease) : undefined, + escrowPayment: isSet(object.escrowPayment) + ? FractionalPayment.fromJSON(object.escrowPayment) + : undefined, + }; + }, + + toJSON(message: QueryLeaseResponse): unknown { + const obj: any = {}; + if (message.lease !== undefined) { + obj.lease = Lease.toJSON(message.lease); + } + if (message.escrowPayment !== undefined) { + obj.escrowPayment = FractionalPayment.toJSON(message.escrowPayment); + } + return obj; + }, + + create(base?: DeepPartial): QueryLeaseResponse { + return QueryLeaseResponse.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryLeaseResponse { + const message = createBaseQueryLeaseResponse(); + message.lease = + object.lease !== undefined && object.lease !== null + ? Lease.fromPartial(object.lease) + : undefined; + message.escrowPayment = + object.escrowPayment !== undefined && object.escrowPayment !== null + ? FractionalPayment.fromPartial(object.escrowPayment) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryLeaseResponse.$type, QueryLeaseResponse); + +/** Query defines the gRPC querier service */ +export interface Query { + /** Orders queries orders with filters */ + Orders(request: QueryOrdersRequest): Promise; + /** Order queries order details */ + Order(request: QueryOrderRequest): Promise; + /** Bids queries bids with filters */ + Bids(request: QueryBidsRequest): Promise; + /** Bid queries bid details */ + Bid(request: QueryBidRequest): Promise; + /** Leases queries leases with filters */ + Leases(request: QueryLeasesRequest): Promise; + /** Lease queries lease details */ + Lease(request: QueryLeaseRequest): Promise; +} + +export const QueryServiceName = 'akash.market.v1beta5.Query'; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Orders = this.Orders.bind(this); + this.Order = this.Order.bind(this); + this.Bids = this.Bids.bind(this); + this.Bid = this.Bid.bind(this); + this.Leases = this.Leases.bind(this); + this.Lease = this.Lease.bind(this); + } + Orders(request: QueryOrdersRequest): Promise { + const data = QueryOrdersRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Orders', data); + return promise.then((data) => + QueryOrdersResponse.decode(_m0.Reader.create(data)), + ); + } + + Order(request: QueryOrderRequest): Promise { + const data = QueryOrderRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Order', data); + return promise.then((data) => + QueryOrderResponse.decode(_m0.Reader.create(data)), + ); + } + + Bids(request: QueryBidsRequest): Promise { + const data = QueryBidsRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Bids', data); + return promise.then((data) => + QueryBidsResponse.decode(_m0.Reader.create(data)), + ); + } + + Bid(request: QueryBidRequest): Promise { + const data = QueryBidRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Bid', data); + return promise.then((data) => + QueryBidResponse.decode(_m0.Reader.create(data)), + ); + } + + Leases(request: QueryLeasesRequest): Promise { + const data = QueryLeasesRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Leases', data); + return promise.then((data) => + QueryLeasesResponse.decode(_m0.Reader.create(data)), + ); + } + + Lease(request: QueryLeaseRequest): Promise { + const data = QueryLeaseRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Lease', data); + return promise.then((data) => + QueryLeaseResponse.decode(_m0.Reader.create(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/market/v1beta5/service.grpc-js.ts b/ts/src/generated/akash/market/v1beta5/service.grpc-js.ts new file mode 100644 index 00000000..a1efbda5 --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/service.grpc-js.ts @@ -0,0 +1,252 @@ +/* eslint-disable */ +import { + ChannelCredentials, + Client, + makeGenericClientConstructor, + Metadata, +} from '@grpc/grpc-js'; +import type { + CallOptions, + ClientOptions, + ClientUnaryCall, + handleUnaryCall, + ServiceError, + UntypedServiceImplementation, +} from '@grpc/grpc-js'; +import { + MsgCloseBid, + MsgCloseBidResponse, + MsgCreateBid, + MsgCreateBidResponse, +} from './bid'; +import { + MsgCloseLease, + MsgCloseLeaseResponse, + MsgCreateLease, + MsgCreateLeaseResponse, + MsgWithdrawLease, + MsgWithdrawLeaseResponse, +} from './lease'; + +export const protobufPackage = 'akash.market.v1beta5'; + +/** Msg defines the market Msg service */ +export type MsgService = typeof MsgService; +export const MsgService = { + /** CreateBid defines a method to create a bid given proper inputs. */ + createBid: { + path: '/akash.market.v1beta5.Msg/CreateBid', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCreateBid) => + Buffer.from(MsgCreateBid.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCreateBid.decode(value), + responseSerialize: (value: MsgCreateBidResponse) => + Buffer.from(MsgCreateBidResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgCreateBidResponse.decode(value), + }, + /** CloseBid defines a method to close a bid given proper inputs. */ + closeBid: { + path: '/akash.market.v1beta5.Msg/CloseBid', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCloseBid) => + Buffer.from(MsgCloseBid.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCloseBid.decode(value), + responseSerialize: (value: MsgCloseBidResponse) => + Buffer.from(MsgCloseBidResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgCloseBidResponse.decode(value), + }, + /** WithdrawLease withdraws accrued funds from the lease payment */ + withdrawLease: { + path: '/akash.market.v1beta5.Msg/WithdrawLease', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgWithdrawLease) => + Buffer.from(MsgWithdrawLease.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgWithdrawLease.decode(value), + responseSerialize: (value: MsgWithdrawLeaseResponse) => + Buffer.from(MsgWithdrawLeaseResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgWithdrawLeaseResponse.decode(value), + }, + /** CreateLease creates a new lease */ + createLease: { + path: '/akash.market.v1beta5.Msg/CreateLease', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCreateLease) => + Buffer.from(MsgCreateLease.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCreateLease.decode(value), + responseSerialize: (value: MsgCreateLeaseResponse) => + Buffer.from(MsgCreateLeaseResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => + MsgCreateLeaseResponse.decode(value), + }, + /** CloseLease defines a method to close an order given proper inputs. */ + closeLease: { + path: '/akash.market.v1beta5.Msg/CloseLease', + requestStream: false, + responseStream: false, + requestSerialize: (value: MsgCloseLease) => + Buffer.from(MsgCloseLease.encode(value).finish()), + requestDeserialize: (value: Buffer) => MsgCloseLease.decode(value), + responseSerialize: (value: MsgCloseLeaseResponse) => + Buffer.from(MsgCloseLeaseResponse.encode(value).finish()), + responseDeserialize: (value: Buffer) => MsgCloseLeaseResponse.decode(value), + }, +} as const; + +export interface MsgServer extends UntypedServiceImplementation { + /** CreateBid defines a method to create a bid given proper inputs. */ + createBid: handleUnaryCall; + /** CloseBid defines a method to close a bid given proper inputs. */ + closeBid: handleUnaryCall; + /** WithdrawLease withdraws accrued funds from the lease payment */ + withdrawLease: handleUnaryCall; + /** CreateLease creates a new lease */ + createLease: handleUnaryCall; + /** CloseLease defines a method to close an order given proper inputs. */ + closeLease: handleUnaryCall; +} + +export interface MsgClient extends Client { + /** CreateBid defines a method to create a bid given proper inputs. */ + createBid( + request: MsgCreateBid, + callback: ( + error: ServiceError | null, + response: MsgCreateBidResponse, + ) => void, + ): ClientUnaryCall; + createBid( + request: MsgCreateBid, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCreateBidResponse, + ) => void, + ): ClientUnaryCall; + createBid( + request: MsgCreateBid, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCreateBidResponse, + ) => void, + ): ClientUnaryCall; + /** CloseBid defines a method to close a bid given proper inputs. */ + closeBid( + request: MsgCloseBid, + callback: ( + error: ServiceError | null, + response: MsgCloseBidResponse, + ) => void, + ): ClientUnaryCall; + closeBid( + request: MsgCloseBid, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCloseBidResponse, + ) => void, + ): ClientUnaryCall; + closeBid( + request: MsgCloseBid, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCloseBidResponse, + ) => void, + ): ClientUnaryCall; + /** WithdrawLease withdraws accrued funds from the lease payment */ + withdrawLease( + request: MsgWithdrawLease, + callback: ( + error: ServiceError | null, + response: MsgWithdrawLeaseResponse, + ) => void, + ): ClientUnaryCall; + withdrawLease( + request: MsgWithdrawLease, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgWithdrawLeaseResponse, + ) => void, + ): ClientUnaryCall; + withdrawLease( + request: MsgWithdrawLease, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgWithdrawLeaseResponse, + ) => void, + ): ClientUnaryCall; + /** CreateLease creates a new lease */ + createLease( + request: MsgCreateLease, + callback: ( + error: ServiceError | null, + response: MsgCreateLeaseResponse, + ) => void, + ): ClientUnaryCall; + createLease( + request: MsgCreateLease, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCreateLeaseResponse, + ) => void, + ): ClientUnaryCall; + createLease( + request: MsgCreateLease, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCreateLeaseResponse, + ) => void, + ): ClientUnaryCall; + /** CloseLease defines a method to close an order given proper inputs. */ + closeLease( + request: MsgCloseLease, + callback: ( + error: ServiceError | null, + response: MsgCloseLeaseResponse, + ) => void, + ): ClientUnaryCall; + closeLease( + request: MsgCloseLease, + metadata: Metadata, + callback: ( + error: ServiceError | null, + response: MsgCloseLeaseResponse, + ) => void, + ): ClientUnaryCall; + closeLease( + request: MsgCloseLease, + metadata: Metadata, + options: Partial, + callback: ( + error: ServiceError | null, + response: MsgCloseLeaseResponse, + ) => void, + ): ClientUnaryCall; +} + +export const MsgClient = makeGenericClientConstructor( + MsgService, + 'akash.market.v1beta5.Msg', +) as unknown as { + new ( + address: string, + credentials: ChannelCredentials, + options?: Partial, + ): MsgClient; + service: typeof MsgService; + serviceName: string; +}; diff --git a/ts/src/generated/akash/market/v1beta5/service.ts b/ts/src/generated/akash/market/v1beta5/service.ts new file mode 100644 index 00000000..93ce407a --- /dev/null +++ b/ts/src/generated/akash/market/v1beta5/service.ts @@ -0,0 +1,92 @@ +/* eslint-disable */ +import _m0 from 'protobufjs/minimal'; +import { + MsgCloseBid, + MsgCloseBidResponse, + MsgCreateBid, + MsgCreateBidResponse, +} from './bid'; +import { + MsgCloseLease, + MsgCloseLeaseResponse, + MsgCreateLease, + MsgCreateLeaseResponse, + MsgWithdrawLease, + MsgWithdrawLeaseResponse, +} from './lease'; + +/** Msg defines the market Msg service */ +export interface Msg { + /** CreateBid defines a method to create a bid given proper inputs. */ + CreateBid(request: MsgCreateBid): Promise; + /** CloseBid defines a method to close a bid given proper inputs. */ + CloseBid(request: MsgCloseBid): Promise; + /** WithdrawLease withdraws accrued funds from the lease payment */ + WithdrawLease(request: MsgWithdrawLease): Promise; + /** CreateLease creates a new lease */ + CreateLease(request: MsgCreateLease): Promise; + /** CloseLease defines a method to close an order given proper inputs. */ + CloseLease(request: MsgCloseLease): Promise; +} + +export const MsgServiceName = 'akash.market.v1beta5.Msg'; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.CreateBid = this.CreateBid.bind(this); + this.CloseBid = this.CloseBid.bind(this); + this.WithdrawLease = this.WithdrawLease.bind(this); + this.CreateLease = this.CreateLease.bind(this); + this.CloseLease = this.CloseLease.bind(this); + } + CreateBid(request: MsgCreateBid): Promise { + const data = MsgCreateBid.encode(request).finish(); + const promise = this.rpc.request(this.service, 'CreateBid', data); + return promise.then((data) => + MsgCreateBidResponse.decode(_m0.Reader.create(data)), + ); + } + + CloseBid(request: MsgCloseBid): Promise { + const data = MsgCloseBid.encode(request).finish(); + const promise = this.rpc.request(this.service, 'CloseBid', data); + return promise.then((data) => + MsgCloseBidResponse.decode(_m0.Reader.create(data)), + ); + } + + WithdrawLease(request: MsgWithdrawLease): Promise { + const data = MsgWithdrawLease.encode(request).finish(); + const promise = this.rpc.request(this.service, 'WithdrawLease', data); + return promise.then((data) => + MsgWithdrawLeaseResponse.decode(_m0.Reader.create(data)), + ); + } + + CreateLease(request: MsgCreateLease): Promise { + const data = MsgCreateLease.encode(request).finish(); + const promise = this.rpc.request(this.service, 'CreateLease', data); + return promise.then((data) => + MsgCreateLeaseResponse.decode(_m0.Reader.create(data)), + ); + } + + CloseLease(request: MsgCloseLease): Promise { + const data = MsgCloseLease.encode(request).finish(); + const promise = this.rpc.request(this.service, 'CloseLease', data); + return promise.then((data) => + MsgCloseLeaseResponse.decode(_m0.Reader.create(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} diff --git a/ts/src/generated/akash/provider/v1beta4/genesis.ts b/ts/src/generated/akash/provider/v1beta4/genesis.ts new file mode 100644 index 00000000..6397b8a4 --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/genesis.ts @@ -0,0 +1,108 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Provider } from './provider'; + +/** GenesisState defines the basic genesis state used by provider module */ +export interface GenesisState { + $type: 'akash.provider.v1beta4.GenesisState'; + providers: Provider[]; +} + +function createBaseGenesisState(): GenesisState { + return { $type: 'akash.provider.v1beta4.GenesisState', providers: [] }; +} + +export const GenesisState = { + $type: 'akash.provider.v1beta4.GenesisState' as const, + + encode( + message: GenesisState, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.providers) { + Provider.encode(v!, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): GenesisState { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseGenesisState(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.providers.push(Provider.decode(reader, reader.uint32())); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): GenesisState { + return { + $type: GenesisState.$type, + providers: globalThis.Array.isArray(object?.providers) + ? object.providers.map((e: any) => Provider.fromJSON(e)) + : [], + }; + }, + + toJSON(message: GenesisState): unknown { + const obj: any = {}; + if (message.providers?.length) { + obj.providers = message.providers.map((e) => Provider.toJSON(e)); + } + return obj; + }, + + create(base?: DeepPartial): GenesisState { + return GenesisState.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): GenesisState { + const message = createBaseGenesisState(); + message.providers = + object.providers?.map((e) => Provider.fromPartial(e)) || []; + return message; + }, +}; + +messageTypeRegistry.set(GenesisState.$type, GenesisState); + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} diff --git a/ts/src/generated/akash/provider/v1beta4/provider.ts b/ts/src/generated/akash/provider/v1beta4/provider.ts new file mode 100644 index 00000000..599a7914 --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/provider.ts @@ -0,0 +1,869 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Attribute } from '../../base/attributes/v1/attribute'; + +/** ProviderInfo */ +export interface ProviderInfo { + $type: 'akash.provider.v1beta4.ProviderInfo'; + email: string; + website: string; +} + +/** MsgCreateProvider defines an SDK message for creating a provider */ +export interface MsgCreateProvider { + $type: 'akash.provider.v1beta4.MsgCreateProvider'; + owner: string; + hostUri: string; + attributes: Attribute[]; + info: ProviderInfo | undefined; +} + +/** MsgCreateProviderResponse defines the Msg/CreateProvider response type. */ +export interface MsgCreateProviderResponse { + $type: 'akash.provider.v1beta4.MsgCreateProviderResponse'; +} + +/** MsgUpdateProvider defines an SDK message for updating a provider */ +export interface MsgUpdateProvider { + $type: 'akash.provider.v1beta4.MsgUpdateProvider'; + owner: string; + hostUri: string; + attributes: Attribute[]; + info: ProviderInfo | undefined; +} + +/** MsgUpdateProviderResponse defines the Msg/UpdateProvider response type. */ +export interface MsgUpdateProviderResponse { + $type: 'akash.provider.v1beta4.MsgUpdateProviderResponse'; +} + +/** MsgDeleteProvider defines an SDK message for deleting a provider */ +export interface MsgDeleteProvider { + $type: 'akash.provider.v1beta4.MsgDeleteProvider'; + owner: string; +} + +/** MsgDeleteProviderResponse defines the Msg/DeleteProvider response type. */ +export interface MsgDeleteProviderResponse { + $type: 'akash.provider.v1beta4.MsgDeleteProviderResponse'; +} + +/** Provider stores owner and host details */ +export interface Provider { + $type: 'akash.provider.v1beta4.Provider'; + owner: string; + hostUri: string; + attributes: Attribute[]; + info: ProviderInfo | undefined; +} + +function createBaseProviderInfo(): ProviderInfo { + return { + $type: 'akash.provider.v1beta4.ProviderInfo', + email: '', + website: '', + }; +} + +export const ProviderInfo = { + $type: 'akash.provider.v1beta4.ProviderInfo' as const, + + encode( + message: ProviderInfo, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.email !== '') { + writer.uint32(10).string(message.email); + } + if (message.website !== '') { + writer.uint32(18).string(message.website); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): ProviderInfo { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseProviderInfo(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.email = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.website = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): ProviderInfo { + return { + $type: ProviderInfo.$type, + email: isSet(object.email) ? globalThis.String(object.email) : '', + website: isSet(object.website) ? globalThis.String(object.website) : '', + }; + }, + + toJSON(message: ProviderInfo): unknown { + const obj: any = {}; + if (message.email !== '') { + obj.email = message.email; + } + if (message.website !== '') { + obj.website = message.website; + } + return obj; + }, + + create(base?: DeepPartial): ProviderInfo { + return ProviderInfo.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): ProviderInfo { + const message = createBaseProviderInfo(); + message.email = object.email ?? ''; + message.website = object.website ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(ProviderInfo.$type, ProviderInfo); + +function createBaseMsgCreateProvider(): MsgCreateProvider { + return { + $type: 'akash.provider.v1beta4.MsgCreateProvider', + owner: '', + hostUri: '', + attributes: [], + info: undefined, + }; +} + +export const MsgCreateProvider = { + $type: 'akash.provider.v1beta4.MsgCreateProvider' as const, + + encode( + message: MsgCreateProvider, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (message.hostUri !== '') { + writer.uint32(18).string(message.hostUri); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); + } + if (message.info !== undefined) { + ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgCreateProvider { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.hostUri = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.info = ProviderInfo.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgCreateProvider { + return { + $type: MsgCreateProvider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : '', + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, + }; + }, + + toJSON(message: MsgCreateProvider): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (message.hostUri !== '') { + obj.hostUri = message.hostUri; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + if (message.info !== undefined) { + obj.info = ProviderInfo.toJSON(message.info); + } + return obj; + }, + + create(base?: DeepPartial): MsgCreateProvider { + return MsgCreateProvider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgCreateProvider { + const message = createBaseMsgCreateProvider(); + message.owner = object.owner ?? ''; + message.hostUri = object.hostUri ?? ''; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + message.info = + object.info !== undefined && object.info !== null + ? ProviderInfo.fromPartial(object.info) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgCreateProvider.$type, MsgCreateProvider); + +function createBaseMsgCreateProviderResponse(): MsgCreateProviderResponse { + return { $type: 'akash.provider.v1beta4.MsgCreateProviderResponse' }; +} + +export const MsgCreateProviderResponse = { + $type: 'akash.provider.v1beta4.MsgCreateProviderResponse' as const, + + encode( + _: MsgCreateProviderResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgCreateProviderResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgCreateProviderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgCreateProviderResponse { + return { $type: MsgCreateProviderResponse.$type }; + }, + + toJSON(_: MsgCreateProviderResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgCreateProviderResponse { + return MsgCreateProviderResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgCreateProviderResponse { + const message = createBaseMsgCreateProviderResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgCreateProviderResponse.$type, + MsgCreateProviderResponse, +); + +function createBaseMsgUpdateProvider(): MsgUpdateProvider { + return { + $type: 'akash.provider.v1beta4.MsgUpdateProvider', + owner: '', + hostUri: '', + attributes: [], + info: undefined, + }; +} + +export const MsgUpdateProvider = { + $type: 'akash.provider.v1beta4.MsgUpdateProvider' as const, + + encode( + message: MsgUpdateProvider, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (message.hostUri !== '') { + writer.uint32(18).string(message.hostUri); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); + } + if (message.info !== undefined) { + ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgUpdateProvider { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.hostUri = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.info = ProviderInfo.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgUpdateProvider { + return { + $type: MsgUpdateProvider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : '', + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, + }; + }, + + toJSON(message: MsgUpdateProvider): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (message.hostUri !== '') { + obj.hostUri = message.hostUri; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + if (message.info !== undefined) { + obj.info = ProviderInfo.toJSON(message.info); + } + return obj; + }, + + create(base?: DeepPartial): MsgUpdateProvider { + return MsgUpdateProvider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgUpdateProvider { + const message = createBaseMsgUpdateProvider(); + message.owner = object.owner ?? ''; + message.hostUri = object.hostUri ?? ''; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + message.info = + object.info !== undefined && object.info !== null + ? ProviderInfo.fromPartial(object.info) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(MsgUpdateProvider.$type, MsgUpdateProvider); + +function createBaseMsgUpdateProviderResponse(): MsgUpdateProviderResponse { + return { $type: 'akash.provider.v1beta4.MsgUpdateProviderResponse' }; +} + +export const MsgUpdateProviderResponse = { + $type: 'akash.provider.v1beta4.MsgUpdateProviderResponse' as const, + + encode( + _: MsgUpdateProviderResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgUpdateProviderResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgUpdateProviderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgUpdateProviderResponse { + return { $type: MsgUpdateProviderResponse.$type }; + }, + + toJSON(_: MsgUpdateProviderResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgUpdateProviderResponse { + return MsgUpdateProviderResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgUpdateProviderResponse { + const message = createBaseMsgUpdateProviderResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgUpdateProviderResponse.$type, + MsgUpdateProviderResponse, +); + +function createBaseMsgDeleteProvider(): MsgDeleteProvider { + return { $type: 'akash.provider.v1beta4.MsgDeleteProvider', owner: '' }; +} + +export const MsgDeleteProvider = { + $type: 'akash.provider.v1beta4.MsgDeleteProvider' as const, + + encode( + message: MsgDeleteProvider, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): MsgDeleteProvider { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDeleteProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): MsgDeleteProvider { + return { + $type: MsgDeleteProvider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + }; + }, + + toJSON(message: MsgDeleteProvider): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + return obj; + }, + + create(base?: DeepPartial): MsgDeleteProvider { + return MsgDeleteProvider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): MsgDeleteProvider { + const message = createBaseMsgDeleteProvider(); + message.owner = object.owner ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(MsgDeleteProvider.$type, MsgDeleteProvider); + +function createBaseMsgDeleteProviderResponse(): MsgDeleteProviderResponse { + return { $type: 'akash.provider.v1beta4.MsgDeleteProviderResponse' }; +} + +export const MsgDeleteProviderResponse = { + $type: 'akash.provider.v1beta4.MsgDeleteProviderResponse' as const, + + encode( + _: MsgDeleteProviderResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): MsgDeleteProviderResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseMsgDeleteProviderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(_: any): MsgDeleteProviderResponse { + return { $type: MsgDeleteProviderResponse.$type }; + }, + + toJSON(_: MsgDeleteProviderResponse): unknown { + const obj: any = {}; + return obj; + }, + + create( + base?: DeepPartial, + ): MsgDeleteProviderResponse { + return MsgDeleteProviderResponse.fromPartial(base ?? {}); + }, + fromPartial( + _: DeepPartial, + ): MsgDeleteProviderResponse { + const message = createBaseMsgDeleteProviderResponse(); + return message; + }, +}; + +messageTypeRegistry.set( + MsgDeleteProviderResponse.$type, + MsgDeleteProviderResponse, +); + +function createBaseProvider(): Provider { + return { + $type: 'akash.provider.v1beta4.Provider', + owner: '', + hostUri: '', + attributes: [], + info: undefined, + }; +} + +export const Provider = { + $type: 'akash.provider.v1beta4.Provider' as const, + + encode( + message: Provider, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + if (message.hostUri !== '') { + writer.uint32(18).string(message.hostUri); + } + for (const v of message.attributes) { + Attribute.encode(v!, writer.uint32(26).fork()).ldelim(); + } + if (message.info !== undefined) { + ProviderInfo.encode(message.info, writer.uint32(34).fork()).ldelim(); + } + return writer; + }, + + decode(input: _m0.Reader | Uint8Array, length?: number): Provider { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseProvider(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.hostUri = reader.string(); + continue; + case 3: + if (tag !== 26) { + break; + } + + message.attributes.push(Attribute.decode(reader, reader.uint32())); + continue; + case 4: + if (tag !== 34) { + break; + } + + message.info = ProviderInfo.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): Provider { + return { + $type: Provider.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + hostUri: isSet(object.hostUri) ? globalThis.String(object.hostUri) : '', + attributes: globalThis.Array.isArray(object?.attributes) + ? object.attributes.map((e: any) => Attribute.fromJSON(e)) + : [], + info: isSet(object.info) ? ProviderInfo.fromJSON(object.info) : undefined, + }; + }, + + toJSON(message: Provider): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + if (message.hostUri !== '') { + obj.hostUri = message.hostUri; + } + if (message.attributes?.length) { + obj.attributes = message.attributes.map((e) => Attribute.toJSON(e)); + } + if (message.info !== undefined) { + obj.info = ProviderInfo.toJSON(message.info); + } + return obj; + }, + + create(base?: DeepPartial): Provider { + return Provider.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): Provider { + const message = createBaseProvider(); + message.owner = object.owner ?? ''; + message.hostUri = object.hostUri ?? ''; + message.attributes = + object.attributes?.map((e) => Attribute.fromPartial(e)) || []; + message.info = + object.info !== undefined && object.info !== null + ? ProviderInfo.fromPartial(object.info) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(Provider.$type, Provider); + +/** Msg defines the provider Msg service */ +export interface Msg { + /** CreateProvider defines a method that creates a provider given the proper inputs */ + CreateProvider( + request: MsgCreateProvider, + ): Promise; + /** UpdateProvider defines a method that updates a provider given the proper inputs */ + UpdateProvider( + request: MsgUpdateProvider, + ): Promise; + /** DeleteProvider defines a method that deletes a provider given the proper inputs */ + DeleteProvider( + request: MsgDeleteProvider, + ): Promise; +} + +export const MsgServiceName = 'akash.provider.v1beta4.Msg'; +export class MsgClientImpl implements Msg { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || MsgServiceName; + this.rpc = rpc; + this.CreateProvider = this.CreateProvider.bind(this); + this.UpdateProvider = this.UpdateProvider.bind(this); + this.DeleteProvider = this.DeleteProvider.bind(this); + } + CreateProvider( + request: MsgCreateProvider, + ): Promise { + const data = MsgCreateProvider.encode(request).finish(); + const promise = this.rpc.request(this.service, 'CreateProvider', data); + return promise.then((data) => + MsgCreateProviderResponse.decode(_m0.Reader.create(data)), + ); + } + + UpdateProvider( + request: MsgUpdateProvider, + ): Promise { + const data = MsgUpdateProvider.encode(request).finish(); + const promise = this.rpc.request(this.service, 'UpdateProvider', data); + return promise.then((data) => + MsgUpdateProviderResponse.decode(_m0.Reader.create(data)), + ); + } + + DeleteProvider( + request: MsgDeleteProvider, + ): Promise { + const data = MsgDeleteProvider.encode(request).finish(); + const promise = this.rpc.request(this.service, 'DeleteProvider', data); + return promise.then((data) => + MsgDeleteProviderResponse.decode(_m0.Reader.create(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/akash/provider/v1beta4/query.ts b/ts/src/generated/akash/provider/v1beta4/query.ts new file mode 100644 index 00000000..6be6f8c6 --- /dev/null +++ b/ts/src/generated/akash/provider/v1beta4/query.ts @@ -0,0 +1,443 @@ +/* eslint-disable */ +import Long from 'long'; +import _m0 from 'protobufjs/minimal'; +import { + PageRequest, + PageResponse, +} from '../../../cosmos/base/query/v1beta1/pagination'; +import { messageTypeRegistry } from '../../../typeRegistry'; +import { Provider } from './provider'; + +/** QueryProvidersRequest is request type for the Query/Providers RPC method */ +export interface QueryProvidersRequest { + $type: 'akash.provider.v1beta4.QueryProvidersRequest'; + pagination: PageRequest | undefined; +} + +/** QueryProvidersResponse is response type for the Query/Providers RPC method */ +export interface QueryProvidersResponse { + $type: 'akash.provider.v1beta4.QueryProvidersResponse'; + providers: Provider[]; + pagination: PageResponse | undefined; +} + +/** QueryProviderRequest is request type for the Query/Provider RPC method */ +export interface QueryProviderRequest { + $type: 'akash.provider.v1beta4.QueryProviderRequest'; + owner: string; +} + +/** QueryProviderResponse is response type for the Query/Provider RPC method */ +export interface QueryProviderResponse { + $type: 'akash.provider.v1beta4.QueryProviderResponse'; + provider: Provider | undefined; +} + +function createBaseQueryProvidersRequest(): QueryProvidersRequest { + return { + $type: 'akash.provider.v1beta4.QueryProvidersRequest', + pagination: undefined, + }; +} + +export const QueryProvidersRequest = { + $type: 'akash.provider.v1beta4.QueryProvidersRequest' as const, + + encode( + message: QueryProvidersRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.pagination !== undefined) { + PageRequest.encode(message.pagination, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryProvidersRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProvidersRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.pagination = PageRequest.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProvidersRequest { + return { + $type: QueryProvidersRequest.$type, + pagination: isSet(object.pagination) + ? PageRequest.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryProvidersRequest): unknown { + const obj: any = {}; + if (message.pagination !== undefined) { + obj.pagination = PageRequest.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryProvidersRequest { + return QueryProvidersRequest.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProvidersRequest { + const message = createBaseQueryProvidersRequest(); + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageRequest.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryProvidersRequest.$type, QueryProvidersRequest); + +function createBaseQueryProvidersResponse(): QueryProvidersResponse { + return { + $type: 'akash.provider.v1beta4.QueryProvidersResponse', + providers: [], + pagination: undefined, + }; +} + +export const QueryProvidersResponse = { + $type: 'akash.provider.v1beta4.QueryProvidersResponse' as const, + + encode( + message: QueryProvidersResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + for (const v of message.providers) { + Provider.encode(v!, writer.uint32(10).fork()).ldelim(); + } + if (message.pagination !== undefined) { + PageResponse.encode( + message.pagination, + writer.uint32(18).fork(), + ).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryProvidersResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProvidersResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.providers.push(Provider.decode(reader, reader.uint32())); + continue; + case 2: + if (tag !== 18) { + break; + } + + message.pagination = PageResponse.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProvidersResponse { + return { + $type: QueryProvidersResponse.$type, + providers: globalThis.Array.isArray(object?.providers) + ? object.providers.map((e: any) => Provider.fromJSON(e)) + : [], + pagination: isSet(object.pagination) + ? PageResponse.fromJSON(object.pagination) + : undefined, + }; + }, + + toJSON(message: QueryProvidersResponse): unknown { + const obj: any = {}; + if (message.providers?.length) { + obj.providers = message.providers.map((e) => Provider.toJSON(e)); + } + if (message.pagination !== undefined) { + obj.pagination = PageResponse.toJSON(message.pagination); + } + return obj; + }, + + create(base?: DeepPartial): QueryProvidersResponse { + return QueryProvidersResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProvidersResponse { + const message = createBaseQueryProvidersResponse(); + message.providers = + object.providers?.map((e) => Provider.fromPartial(e)) || []; + message.pagination = + object.pagination !== undefined && object.pagination !== null + ? PageResponse.fromPartial(object.pagination) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryProvidersResponse.$type, QueryProvidersResponse); + +function createBaseQueryProviderRequest(): QueryProviderRequest { + return { $type: 'akash.provider.v1beta4.QueryProviderRequest', owner: '' }; +} + +export const QueryProviderRequest = { + $type: 'akash.provider.v1beta4.QueryProviderRequest' as const, + + encode( + message: QueryProviderRequest, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.owner !== '') { + writer.uint32(10).string(message.owner); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryProviderRequest { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderRequest(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.owner = reader.string(); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderRequest { + return { + $type: QueryProviderRequest.$type, + owner: isSet(object.owner) ? globalThis.String(object.owner) : '', + }; + }, + + toJSON(message: QueryProviderRequest): unknown { + const obj: any = {}; + if (message.owner !== '') { + obj.owner = message.owner; + } + return obj; + }, + + create(base?: DeepPartial): QueryProviderRequest { + return QueryProviderRequest.fromPartial(base ?? {}); + }, + fromPartial(object: DeepPartial): QueryProviderRequest { + const message = createBaseQueryProviderRequest(); + message.owner = object.owner ?? ''; + return message; + }, +}; + +messageTypeRegistry.set(QueryProviderRequest.$type, QueryProviderRequest); + +function createBaseQueryProviderResponse(): QueryProviderResponse { + return { + $type: 'akash.provider.v1beta4.QueryProviderResponse', + provider: undefined, + }; +} + +export const QueryProviderResponse = { + $type: 'akash.provider.v1beta4.QueryProviderResponse' as const, + + encode( + message: QueryProviderResponse, + writer: _m0.Writer = _m0.Writer.create(), + ): _m0.Writer { + if (message.provider !== undefined) { + Provider.encode(message.provider, writer.uint32(10).fork()).ldelim(); + } + return writer; + }, + + decode( + input: _m0.Reader | Uint8Array, + length?: number, + ): QueryProviderResponse { + const reader = + input instanceof _m0.Reader ? input : _m0.Reader.create(input); + let end = length === undefined ? reader.len : reader.pos + length; + const message = createBaseQueryProviderResponse(); + while (reader.pos < end) { + const tag = reader.uint32(); + switch (tag >>> 3) { + case 1: + if (tag !== 10) { + break; + } + + message.provider = Provider.decode(reader, reader.uint32()); + continue; + } + if ((tag & 7) === 4 || tag === 0) { + break; + } + reader.skipType(tag & 7); + } + return message; + }, + + fromJSON(object: any): QueryProviderResponse { + return { + $type: QueryProviderResponse.$type, + provider: isSet(object.provider) + ? Provider.fromJSON(object.provider) + : undefined, + }; + }, + + toJSON(message: QueryProviderResponse): unknown { + const obj: any = {}; + if (message.provider !== undefined) { + obj.provider = Provider.toJSON(message.provider); + } + return obj; + }, + + create(base?: DeepPartial): QueryProviderResponse { + return QueryProviderResponse.fromPartial(base ?? {}); + }, + fromPartial( + object: DeepPartial, + ): QueryProviderResponse { + const message = createBaseQueryProviderResponse(); + message.provider = + object.provider !== undefined && object.provider !== null + ? Provider.fromPartial(object.provider) + : undefined; + return message; + }, +}; + +messageTypeRegistry.set(QueryProviderResponse.$type, QueryProviderResponse); + +/** Query defines the gRPC querier service */ +export interface Query { + /** Providers queries providers */ + Providers(request: QueryProvidersRequest): Promise; + /** Provider queries provider details */ + Provider(request: QueryProviderRequest): Promise; +} + +export const QueryServiceName = 'akash.provider.v1beta4.Query'; +export class QueryClientImpl implements Query { + private readonly rpc: Rpc; + private readonly service: string; + constructor(rpc: Rpc, opts?: { service?: string }) { + this.service = opts?.service || QueryServiceName; + this.rpc = rpc; + this.Providers = this.Providers.bind(this); + this.Provider = this.Provider.bind(this); + } + Providers(request: QueryProvidersRequest): Promise { + const data = QueryProvidersRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Providers', data); + return promise.then((data) => + QueryProvidersResponse.decode(_m0.Reader.create(data)), + ); + } + + Provider(request: QueryProviderRequest): Promise { + const data = QueryProviderRequest.encode(request).finish(); + const promise = this.rpc.request(this.service, 'Provider', data); + return promise.then((data) => + QueryProviderResponse.decode(_m0.Reader.create(data)), + ); + } +} + +interface Rpc { + request( + service: string, + method: string, + data: Uint8Array, + ): Promise; +} + +type Builtin = + | Date + | Function + | Uint8Array + | string + | number + | boolean + | undefined; + +type DeepPartial = T extends Builtin + ? T + : T extends Long + ? string | number | Long + : T extends globalThis.Array + ? globalThis.Array> + : T extends ReadonlyArray + ? ReadonlyArray> + : T extends {} + ? { [K in Exclude]?: DeepPartial } + : Partial; + +if (_m0.util.Long !== Long) { + _m0.util.Long = Long as any; + _m0.configure(); +} + +function isSet(value: any): boolean { + return value !== null && value !== undefined; +} diff --git a/ts/src/generated/index.akash.audit.ts b/ts/src/generated/index.akash.audit.ts index 37c079ec..4c5c6449 100644 --- a/ts/src/generated/index.akash.audit.ts +++ b/ts/src/generated/index.akash.audit.ts @@ -1,3 +1,3 @@ /* eslint-disable */ -export * as v1beta3 from './index.akash.audit.v1beta3'; +export * as v1beta4 from './index.akash.audit.v1beta4'; diff --git a/ts/src/generated/index.akash.audit.v1beta4.ts b/ts/src/generated/index.akash.audit.v1beta4.ts new file mode 100644 index 00000000..7e3fa99c --- /dev/null +++ b/ts/src/generated/index.akash.audit.v1beta4.ts @@ -0,0 +1,5 @@ +/* eslint-disable */ + +export * from './akash/audit/v1beta4/audit'; +export * from './akash/audit/v1beta4/query'; +export * from './akash/audit/v1beta4/genesis'; diff --git a/ts/src/generated/index.akash.base.attributes.ts b/ts/src/generated/index.akash.base.attributes.ts new file mode 100644 index 00000000..931446b9 --- /dev/null +++ b/ts/src/generated/index.akash.base.attributes.ts @@ -0,0 +1,3 @@ +/* eslint-disable */ + +export * as v1 from './index.akash.base.attributes.v1'; diff --git a/ts/src/generated/index.akash.base.attributes.v1.ts b/ts/src/generated/index.akash.base.attributes.v1.ts new file mode 100644 index 00000000..58a80f84 --- /dev/null +++ b/ts/src/generated/index.akash.base.attributes.v1.ts @@ -0,0 +1,3 @@ +/* eslint-disable */ + +export * from './akash/base/attributes/v1/attribute'; diff --git a/ts/src/generated/index.akash.base.resources.ts b/ts/src/generated/index.akash.base.resources.ts new file mode 100644 index 00000000..59343f00 --- /dev/null +++ b/ts/src/generated/index.akash.base.resources.ts @@ -0,0 +1,3 @@ +/* eslint-disable */ + +export * as v1 from './index.akash.base.resources.v1'; diff --git a/ts/src/generated/index.akash.base.resources.v1.ts b/ts/src/generated/index.akash.base.resources.v1.ts new file mode 100644 index 00000000..bf1904f4 --- /dev/null +++ b/ts/src/generated/index.akash.base.resources.v1.ts @@ -0,0 +1,9 @@ +/* eslint-disable */ + +export * from './akash/base/resources/v1/resourcevalue'; +export * from './akash/base/resources/v1/cpu'; +export * from './akash/base/resources/v1/gpu'; +export * from './akash/base/resources/v1/memory'; +export * from './akash/base/resources/v1/storage'; +export * from './akash/base/resources/v1/endpoint'; +export * from './akash/base/resources/v1/resources'; diff --git a/ts/src/generated/index.akash.deployment.v1beta4.grpc-js.ts b/ts/src/generated/index.akash.deployment.v1beta4.grpc-js.ts new file mode 100644 index 00000000..6f74b5be --- /dev/null +++ b/ts/src/generated/index.akash.deployment.v1beta4.grpc-js.ts @@ -0,0 +1 @@ +export * from './akash/deployment/v1beta4/service.grpc-js'; diff --git a/ts/src/generated/index.akash.deployment.v1beta4.ts b/ts/src/generated/index.akash.deployment.v1beta4.ts new file mode 100644 index 00000000..72677db1 --- /dev/null +++ b/ts/src/generated/index.akash.deployment.v1beta4.ts @@ -0,0 +1,4 @@ +/* eslint-disable */ + +export * from './akash/deployment/v1beta4/resourceunit'; +export * from './akash/deployment/v1beta4/groupspec'; diff --git a/ts/src/generated/index.akash.manifest.v2beta3.grpc-js.ts b/ts/src/generated/index.akash.manifest.v2beta3.grpc-js.ts new file mode 100644 index 00000000..4a8ecc3d --- /dev/null +++ b/ts/src/generated/index.akash.manifest.v2beta3.grpc-js.ts @@ -0,0 +1 @@ +export * from './akash/manifest/v2beta3/service.grpc-js'; diff --git a/ts/src/generated/index.akash.manifest.v2beta3.ts b/ts/src/generated/index.akash.manifest.v2beta3.ts new file mode 100644 index 00000000..c4258e69 --- /dev/null +++ b/ts/src/generated/index.akash.manifest.v2beta3.ts @@ -0,0 +1,6 @@ +/* eslint-disable */ + +export * from './akash/manifest/v2beta3/httpoptions'; +export * from './akash/manifest/v2beta3/serviceexpose'; +export * from './akash/manifest/v2beta3/service'; +export * from './akash/manifest/v2beta3/group'; diff --git a/ts/src/generated/index.akash.market.v1beta5.grpc-js.ts b/ts/src/generated/index.akash.market.v1beta5.grpc-js.ts new file mode 100644 index 00000000..34d97e00 --- /dev/null +++ b/ts/src/generated/index.akash.market.v1beta5.grpc-js.ts @@ -0,0 +1 @@ +export * from './akash/market/v1beta5/service.grpc-js'; diff --git a/ts/src/generated/index.akash.market.v1beta5.ts b/ts/src/generated/index.akash.market.v1beta5.ts new file mode 100644 index 00000000..b2417c0d --- /dev/null +++ b/ts/src/generated/index.akash.market.v1beta5.ts @@ -0,0 +1,9 @@ +/* eslint-disable */ + +export * from './akash/market/v1beta5/order'; +export * from './akash/market/v1beta5/bid'; +export * from './akash/market/v1beta5/lease'; +export * from './akash/market/v1beta5/query'; +export * from './akash/market/v1beta5/service'; +export * from './akash/market/v1beta5/params'; +export * from './akash/market/v1beta5/genesis'; diff --git a/ts/src/generated/index.akash.provider.v1beta4.ts b/ts/src/generated/index.akash.provider.v1beta4.ts new file mode 100644 index 00000000..336fc53c --- /dev/null +++ b/ts/src/generated/index.akash.provider.v1beta4.ts @@ -0,0 +1,5 @@ +/* eslint-disable */ + +export * from './akash/provider/v1beta4/provider'; +export * from './akash/provider/v1beta4/query'; +export * from './akash/provider/v1beta4/genesis';